diff --git a/Gopkg.lock b/Gopkg.lock index c0f1eb298..b4cb036cd 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -2,66 +2,89 @@ [[projects]] + digest = "1:4999763e72e2903ac7eea05a58d13443c69696bfd1935b426e9cc11e2d07ff2a" name = "cloud.google.com/go" - packages = ["compute/metadata"] - revision = "20d4028b8a750c2aca76bf9fefa8ed2d0109b573" - version = "v0.19.0" + packages = [ + "compute/metadata", + "iam", + "kms/apiv1", + ] + pruneopts = "NUT" + revision = "0ebda48a7f143b1cce9eb37a8c1106ac762a3430" + version = "v0.34.0" [[projects]] branch = "master" + digest = "1:707ebe952a8b3d00b343c01536c79c73771d100f63ec6babeaed5c79e2b8a8dd" name = "github.com/beorn7/perks" packages = ["quantile"] + pruneopts = "NUT" revision = "3a771d992973f24aa725d07868b467d1ddfceafb" [[projects]] + digest = "1:94ffc0947c337d618b6ff5ed9abaddc1217b090c1b3a1ae4739b35b7b25851d5" name = "github.com/container-storage-interface/spec" packages = ["lib/go/csi"] + pruneopts = "NUT" revision = "ed0bb0e1557548aa028307f48728767cfe8f6345" version = "v1.0.0" [[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" name = "github.com/davecgh/go-spew" packages = ["spew"] + pruneopts = "NUT" revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" version = "v1.1.1" [[projects]] branch = "master" + digest = "1:4ddc17aeaa82cb18c5f0a25d7c253a10682f518f4b2558a82869506eec223d76" name = "github.com/docker/distribution" packages = [ "digestset", - "reference" + "reference", ] + pruneopts = "NUT" revision = "f3adfea35b77b16b5bce0727f520b16d9d5e5f7c" [[projects]] + digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756" name = "github.com/ghodss/yaml" packages = ["."] + pruneopts = "NUT" revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" version = "v1.0.0" [[projects]] + digest = "1:8679b8a64f3613e9749c5640c3535c83399b8e69f67ce54d91dc73f6d77373af" name = "github.com/gogo/protobuf" packages = [ "proto", - "sortkeys" + "sortkeys", ] + pruneopts = "NUT" revision = "636bf0302bc95575d69441b25a2603156ffdddf1" version = "v1.1.1" [[projects]] branch = "master" + digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a" name = "github.com/golang/glog" packages = ["."] + pruneopts = "NUT" revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" [[projects]] branch = "master" + digest = "1:3fb07f8e222402962fa190eb060608b34eddfb64562a18e2167df2de0ece85d8" name = "github.com/golang/groupcache" packages = ["lru"] + pruneopts = "NUT" revision = "6f2cf27854a4a29e3811b0371547be335d411b8b" [[projects]] + digest = "1:6d7d17c24963f3790b4d244e40ac97139ae4d13764744e9594f2922dd9363c00" name = "github.com/golang/protobuf" packages = [ "proto", @@ -69,98 +92,136 @@ "ptypes", "ptypes/any", "ptypes/duration", + "ptypes/struct", "ptypes/timestamp", - "ptypes/wrappers" + "ptypes/wrappers", ] + pruneopts = "NUT" revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" version = "v1.1.0" [[projects]] branch = "master" + digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107" name = "github.com/google/btree" packages = ["."] + pruneopts = "NUT" revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" [[projects]] branch = "master" + digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc" name = "github.com/google/gofuzz" packages = ["."] + pruneopts = "NUT" revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" [[projects]] + digest = "1:fd4d1f4c2d75aee3833ee7d8ef11fcf42ddec3c63d1819548288c3d868d6eb14" + name = "github.com/googleapis/gax-go" + packages = [ + ".", + "v2", + ] + pruneopts = "NUT" + revision = "c8a15bac9b9fe955bd9f900272f9a306465d28cf" + version = "v2.0.3" + +[[projects]] + digest = "1:06a7dadb7b760767341ffb6c8d377238d68a1226f2b21b5d497d2e3f6ecf6b4e" name = "github.com/googleapis/gnostic" packages = [ "OpenAPIv2", "compiler", - "extensions" + "extensions", ] + pruneopts = "NUT" revision = "7c663266750e7d82587642f65e60bc4083f1f84e" version = "v0.2.0" [[projects]] branch = "master" + digest = "1:7fdf3223c7372d1ced0b98bf53457c5e89d89aecbad9a77ba9fcc6e01f9e5621" name = "github.com/gregjones/httpcache" packages = [ ".", - "diskcache" + "diskcache", ] + pruneopts = "NUT" revision = "9cad4c3443a7200dd6400aef47183728de563a38" [[projects]] branch = "master" + digest = "1:f0d9d74edbd40fdeada436d5ac9cb5197407899af3fef85ff0137077ffe8ae19" name = "github.com/hashicorp/errwrap" packages = ["."] + pruneopts = "NUT" revision = "7554cd9344cec97297fa6649b055a8c98c2a1e55" [[projects]] branch = "master" + digest = "1:4d55897d00e9b53c1c716e8fe504de3d21bec8edba0a330bfaa87902695e46e2" name = "github.com/hashicorp/go-multierror" packages = ["."] + pruneopts = "NUT" revision = "b7773ae218740a7be65057fc60b366a49b538a44" [[projects]] + digest = "1:b42cde0e1f3c816dd57f57f7bbcf05ca40263ad96f168714c130c611fc0856a6" name = "github.com/hashicorp/golang-lru" packages = [ ".", - "simplelru" + "simplelru", ] + pruneopts = "NUT" revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768" version = "v0.5.0" [[projects]] + digest = "1:8e36686e8b139f8fe240c1d5cf3a145bc675c22ff8e707857cdd3ae17b00d728" name = "github.com/json-iterator/go" packages = ["."] + pruneopts = "NUT" revision = "1624edc4454b8682399def8740d46db5e4362ba4" version = "v1.1.5" [[projects]] + digest = "1:009cfbba4123d60ecb41433258ab87dcb8511a1f6c8000222c16cee223f7620a" name = "github.com/kubernetes-csi/csi-test" packages = [ "pkg/sanity", - "utils" + "utils", ] + pruneopts = "NUT" revision = "619da6853e10bef67ddcc8f1c2b68b73154bf11d" version = "v1.0.0-rc2" [[projects]] + digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6" name = "github.com/matttproud/golang_protobuf_extensions" packages = ["pbutil"] + pruneopts = "NUT" revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" version = "v1.0.1" [[projects]] + digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f" name = "github.com/modern-go/concurrent" packages = ["."] + pruneopts = "NUT" revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" version = "1.0.3" [[projects]] + digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6" name = "github.com/modern-go/reflect2" packages = ["."] + pruneopts = "NUT" revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" version = "1.0.1" [[projects]] + digest = "1:044fe5b2b6dbdc0f41090e86466c27f480e7af860f425708b723008616625915" name = "github.com/onsi/ginkgo" packages = [ ".", @@ -180,12 +241,14 @@ "reporters/stenographer", "reporters/stenographer/support/go-colorable", "reporters/stenographer/support/go-isatty", - "types" + "types", ] + pruneopts = "NUT" revision = "9eda700730cba42af70d53180f9dcce9266bc2bc" version = "v1.4.0" [[projects]] + digest = "1:feed851b93f9debbe8180092067c4e29a51c06dbb5fe2f643ec9feea039670a3" name = "github.com/onsi/gomega" packages = [ ".", @@ -199,93 +262,138 @@ "matchers/support/goraph/edge", "matchers/support/goraph/node", "matchers/support/goraph/util", - "types" + "types", ] + pruneopts = "NUT" revision = "003f63b7f4cff3fc95357005358af2de0f5fe152" version = "v1.3.0" [[projects]] + digest = "1:e0cc8395ea893c898ff5eb0850f4d9851c1f57c78c232304a026379a47a552d0" name = "github.com/opencontainers/go-digest" packages = ["."] + pruneopts = "NUT" revision = "279bed98673dd5bef374d3b6e4b09e2af76183bf" version = "v1.0.0-rc1" [[projects]] + digest = "1:cce3a18fb0b96b5015cd8ca03a57d20a662679de03c4dc4b6ff5f17ea2050fa6" name = "github.com/pborman/uuid" packages = ["."] + pruneopts = "NUT" revision = "e790cca94e6cc75c7064b1332e63811d4aae1a53" version = "v1.1" [[projects]] branch = "master" + digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2" name = "github.com/petar/GoLLRB" packages = ["llrb"] + pruneopts = "NUT" revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" [[projects]] + digest = "1:6c6d91dc326ed6778783cff869c49fb2f61303cdd2ebbcf90abe53505793f3b6" name = "github.com/peterbourgon/diskv" packages = ["."] + pruneopts = "NUT" revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" version = "v2.0.1" [[projects]] + digest = "1:3e5fd795ebf6a9e13e67d644da76130af7a6003286531f9573f8074c228b66a3" name = "github.com/prometheus/client_golang" packages = ["prometheus"] + pruneopts = "NUT" revision = "c5b7fccd204277076155f10851dad72b76a49317" version = "v0.8.0" [[projects]] branch = "master" + digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" name = "github.com/prometheus/client_model" packages = ["go"] + pruneopts = "NUT" revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" [[projects]] branch = "master" + digest = "1:fad5a35eea6a1a33d6c8f949fbc146f24275ca809ece854248187683f52cc30b" name = "github.com/prometheus/common" packages = [ "expfmt", "internal/bitbucket.org/ww/goautoneg", - "model" + "model", ] + pruneopts = "NUT" revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" [[projects]] branch = "master" + digest = "1:26a2f5e891cc4d2321f18a0caa84c8e788663c17bed6a487f3cbe2c4295292d0" name = "github.com/prometheus/procfs" packages = [ ".", "internal/util", "nfs", - "xfs" + "xfs", ] + pruneopts = "NUT" revision = "418d78d0b9a7b7de3a6bbc8a23def624cc977bb2" [[projects]] + digest = "1:6989062eb7ccf25cf38bf4fe3dba097ee209f896cda42cefdca3927047bef7b6" name = "github.com/sirupsen/logrus" packages = ["."] + pruneopts = "NUT" revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" version = "v1.0.5" [[projects]] + digest = "1:e3707aeaccd2adc89eba6c062fec72116fe1fc1ba71097da85b4d8ae1668a675" name = "github.com/spf13/pflag" packages = ["."] + pruneopts = "NUT" revision = "9a97c102cda95a86cec2345a6f09f55a939babf5" version = "v1.0.2" +[[projects]] + digest = "1:c0ae5a4135d67428c0afa2453859735d387f8383b5435731b0acfaa6cb61a887" + name = "go.opencensus.io" + packages = [ + "internal", + "internal/tagencoding", + "plugin/ocgrpc", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + ] + pruneopts = "NUT" + revision = "0095aec66ae14801c6711210f6f0716411cefdd3" + version = "v0.8.0" + [[projects]] branch = "master" + digest = "1:8e241498e35f550e5192ee6b1f6ff2c0a7ffe81feff9541d297facffe1383979" name = "golang.org/x/crypto" packages = [ "ed25519", "ed25519/internal/edwards25519", "pbkdf2", - "ssh/terminal" + "ssh/terminal", ] + pruneopts = "NUT" revision = "a49355c7e3f8fe157a85be2f77e6e269a0f89602" [[projects]] branch = "master" + digest = "1:a2398fc4d55f760ff5189b8c72be50688a408d1323cb36ea04d1e787c229d0de" name = "golang.org/x/net" packages = [ "context", @@ -298,32 +406,38 @@ "idna", "internal/timeseries", "lex/httplex", - "trace" + "trace", ] + pruneopts = "NUT" revision = "e0c57d8f86c17f0724497efcb3bc617e82834821" [[projects]] branch = "master" + digest = "1:9a0eeb972e14851a3a0490280d736de3b1c686745c7a4d660642ec2706a446e2" name = "golang.org/x/oauth2" packages = [ ".", "google", "internal", "jws", - "jwt" + "jwt", ] + pruneopts = "NUT" revision = "fdc9e635145ae97e6c2cb777c48305600cf515cb" [[projects]] branch = "master" + digest = "1:e0ab12559b9422309159f6ba4f6d1e419f85a4fb257384b26ddea9fb4e372d18" name = "golang.org/x/sys" packages = [ "unix", - "windows" + "windows", ] + pruneopts = "NUT" revision = "89ac7f292d17a339edab4de8efcba5d8672ff661" [[projects]] + digest = "1:8a12cbc891b7130d3f660f8a309e5c0b083f831e6ac38cdaa1f12e63c12d6bea" name = "golang.org/x/text" packages = [ "collate", @@ -351,19 +465,23 @@ "unicode/bidi", "unicode/cldr", "unicode/norm", - "unicode/rangetable" + "unicode/rangetable", ] + pruneopts = "NUT" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" version = "v0.3.0" [[projects]] branch = "master" + digest = "1:c9e7a4b4d47c0ed205d257648b0e5b0440880cb728506e318f8ac7cd36270bc4" name = "golang.org/x/time" packages = ["rate"] + pruneopts = "NUT" revision = "fbb02b2291d28baffd63558aa44b4b56f178d650" [[projects]] branch = "master" + digest = "1:e08ce1dada421fa30ae0405d23709982d47b22e1719b44f4e61d779b6c769317" name = "google.golang.org/api" packages = [ "cloudresourcemanager/v1", @@ -372,11 +490,21 @@ "compute/v1", "gensupport", "googleapi", - "googleapi/internal/uritemplates" + "googleapi/internal/uritemplates", + "googleapi/transport", + "internal", + "iterator", + "option", + "transport", + "transport/grpc", + "transport/http", + "transport/http/internal/propagation", ] + pruneopts = "NUT" revision = "583d854617af4d2080b5d2a24d72f7fc5a128ab2" [[projects]] + digest = "1:626ac4e70ef18262989f8c52503259109e1a2e5580d23aeae0f0e0349819dade" name = "google.golang.org/appengine" packages = [ ".", @@ -387,19 +515,31 @@ "internal/log", "internal/modules", "internal/remote_api", + "internal/socket", "internal/urlfetch", - "urlfetch" + "socket", + "urlfetch", ] + pruneopts = "NUT" revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a" version = "v1.0.0" [[projects]] branch = "master" + digest = "1:2448cadd3307c12aaf4f7079a658bb3685a5c165cf8f83553e4d84e365d58735" name = "google.golang.org/genproto" - packages = ["googleapis/rpc/status"] - revision = "df60624c1e9b9d2973e889c7a1cff73155da81c4" + packages = [ + "googleapis/api/annotations", + "googleapis/cloud/kms/v1", + "googleapis/iam/v1", + "googleapis/rpc/status", + "protobuf/field_mask", + ] + pruneopts = "NUT" + revision = "ae2f86662275e140f395167f1dab7081a5bd5fa8" [[projects]] + digest = "1:9a0b80a7b0b7105f322f3edb5e3ad45c474179c0bc423fb9b73210a4e9bab229" name = "google.golang.org/grpc" packages = [ ".", @@ -409,6 +549,7 @@ "codes", "connectivity", "credentials", + "credentials/oauth", "encoding", "encoding/proto", "grpclb/grpc_lb_v1/messages", @@ -424,36 +565,44 @@ "stats", "status", "tap", - "transport" + "transport", ] + pruneopts = "NUT" revision = "8e4536a86ab602859c20df5ebfd0bd4228d08655" version = "v1.10.0" [[projects]] + digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" name = "gopkg.in/inf.v0" packages = ["."] + pruneopts = "NUT" revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" version = "v0.9.1" [[projects]] + digest = "1:812f9446bc99ebd1c66c55fa456ff7843f7105d22f11f0a2098bced37e9c6d32" name = "gopkg.in/square/go-jose.v2" packages = [ ".", "cipher", "json", - "jwt" + "jwt", ] + pruneopts = "NUT" revision = "ef984e69dd356202fd4e4910d4d9c24468bdf0b8" version = "v2.1.9" [[projects]] + digest = "1:b92c04db63b4a79baff7afe06e0ee1076383a16834f8b8a90dc04644651f5315" name = "gopkg.in/yaml.v2" packages = ["."] + pruneopts = "NUT" revision = "7f97868eec74b32b0982dd158a51a446d1da7eb5" version = "v2.1.1" [[projects]] branch = "master" + digest = "1:c383bbc5389d57cd091cb98f5e06b52fb6bf46a305d08ee8c8f60229473f6136" name = "k8s.io/api" packages = [ "admissionregistration/v1alpha1", @@ -484,18 +633,22 @@ "settings/v1alpha1", "storage/v1", "storage/v1alpha1", - "storage/v1beta1" + "storage/v1beta1", ] + pruneopts = "NUT" revision = "a191abe0b71e00ce4cde58af8002aa4c1a8bb068" [[projects]] branch = "master" + digest = "1:aff2ac035da399e2ccd6d58a0c484f87a52c82868b652397727c2ce1a3aba7f0" name = "k8s.io/apiextensions-apiserver" packages = ["pkg/features"] + pruneopts = "NUT" revision = "c0b566b8903bc2b6e3f4e41d35b3ae9e5d170c30" [[projects]] branch = "release-1.11" + digest = "1:ebcd64e347d12260cc614a1907dc894f828118def8fcf90c7733b87fd349016c" name = "k8s.io/apimachinery" packages = [ "pkg/api/equality", @@ -542,23 +695,27 @@ "pkg/version", "pkg/watch", "third_party/forked/golang/json", - "third_party/forked/golang/reflect" + "third_party/forked/golang/reflect", ] + pruneopts = "NUT" revision = "def12e63c512da17043b4f0293f52d1006603d9f" [[projects]] branch = "master" + digest = "1:e74fbedce0f81942543ce4077e9f84914579d910c18300e68eb367f927e00e9e" name = "k8s.io/apiserver" packages = [ "pkg/authentication/authenticator", "pkg/authentication/serviceaccount", "pkg/authentication/user", "pkg/features", - "pkg/util/feature" + "pkg/util/feature", ] + pruneopts = "NUT" revision = "adf6303733d5c01d05921f9809e4b702f89b689d" [[projects]] + digest = "1:a0b064d78c636e562a38272e4f5c3a93a51238b76ad788ad2f27e4709a987dc4" name = "k8s.io/client-go" packages = [ "discovery", @@ -678,18 +835,22 @@ "util/connrotation", "util/flowcontrol", "util/integer", - "util/retry" + "util/retry", ] + pruneopts = "NUT" revision = "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" version = "v8.0.0" [[projects]] branch = "master" + digest = "1:a2c842a1e0aed96fd732b535514556323a6f5edfded3b63e5e0ab1bce188aa54" name = "k8s.io/kube-openapi" packages = ["pkg/util/proto"] + pruneopts = "NUT" revision = "c45f27d6c6f59f29099e552cc6bb6119539e0559" [[projects]] + digest = "1:c188a199dfdcfdff2fa79a6ecbd9226e92bb2849cb34bf4449e2ac20cdf81896" name = "k8s.io/kubernetes" packages = [ "pkg/api/legacyscheme", @@ -737,30 +898,64 @@ "pkg/volume/util/fs", "pkg/volume/util/recyclerclient", "pkg/volume/util/types", - "pkg/volume/util/volumepathhandler" + "pkg/volume/util/volumepathhandler", ] + pruneopts = "NUT" revision = "b1b29978270dc22fecc592ac55d903350454310a" version = "v1.11.1" [[projects]] branch = "master" + digest = "1:fcdea6d5a476946968ddc4a301b8f8cdbae4521ead9e556b7399f077cd4871b5" name = "k8s.io/test-infra" packages = [ "boskos/client", "boskos/common", - "boskos/storage" + "boskos/storage", ] + pruneopts = "NUT" revision = "b5f298b6851f4db24587301840e7b7630747d315" [[projects]] branch = "master" + digest = "1:867d98a27033c52150eb4c01ca0f9be938d3010e9a4909ea3a881a83c3ac1b3f" name = "k8s.io/utils" packages = ["exec"] + pruneopts = "NUT" revision = "258e2a2fa64568210fbd6267cf1d8fd87c3cb86e" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "bfe3f0172a04a4e023d6885c701c4c13c81889b15be232669eec1ef74148fae7" + input-imports = [ + "cloud.google.com/go/compute/metadata", + "cloud.google.com/go/kms/apiv1", + "github.com/container-storage-interface/spec/lib/go/csi", + "github.com/golang/glog", + "github.com/golang/protobuf/ptypes", + "github.com/kubernetes-csi/csi-test/pkg/sanity", + "github.com/onsi/ginkgo", + "github.com/onsi/gomega", + "golang.org/x/net/context", + "golang.org/x/oauth2", + "golang.org/x/oauth2/google", + "google.golang.org/api/cloudresourcemanager/v1", + "google.golang.org/api/compute/v0.beta", + "google.golang.org/api/compute/v1", + "google.golang.org/api/googleapi", + "google.golang.org/genproto/googleapis/cloud/kms/v1", + "google.golang.org/grpc", + "google.golang.org/grpc/codes", + "google.golang.org/grpc/status", + "k8s.io/apimachinery/pkg/util/sets", + "k8s.io/apimachinery/pkg/util/uuid", + "k8s.io/apimachinery/pkg/util/wait", + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta", + "k8s.io/kubernetes/pkg/util/mount", + "k8s.io/kubernetes/pkg/volume/util", + "k8s.io/test-infra/boskos/client", + "k8s.io/test-infra/boskos/common", + "k8s.io/utils/exec", + ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 4331ab0ad..12aa2eddb 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -26,7 +26,7 @@ [[constraint]] name = "cloud.google.com/go" - version = "0.19.0" + version = "0.34.0" [[constraint]] name = "github.com/container-storage-interface/spec" diff --git a/test/e2e/tests/multi_zone_e2e_test.go b/test/e2e/tests/multi_zone_e2e_test.go index 4ed6959e0..77c9f77fe 100644 --- a/test/e2e/tests/multi_zone_e2e_test.go +++ b/test/e2e/tests/multi_zone_e2e_test.go @@ -15,6 +15,7 @@ limitations under the License. package tests import ( + "fmt" "path/filepath" "strings" @@ -143,68 +144,96 @@ var _ = Describe("GCE PD CSI Driver Multi-Zone", func() { }) -func testAttachWriteReadDetach(volID string, volName string, instance *remote.InstanceInfo, client *remote.CsiClient, readOnly bool) { +func testAttachWriteReadDetach(volID string, volName string, instance *remote.InstanceInfo, client *remote.CsiClient, readOnly bool) error { var err error glog.Infof("Starting testAttachWriteReadDetach with volume %v node %v with readonly %v\n", volID, instance.GetNodeID(), readOnly) // Attach Disk err = client.ControllerPublishVolume(volID, instance.GetNodeID()) - Expect(err).To(BeNil(), "ControllerPublishVolume failed with error for disk %v on node %v", volID, instance.GetNodeID()) + if err != nil { + return fmt.Errorf("ControllerPublishVolume failed with error for disk %v on node %v: %v", volID, instance.GetNodeID(), err) + } defer func() { - // Detach Disk err = client.ControllerUnpublishVolume(volID, instance.GetNodeID()) - Expect(err).To(BeNil(), "ControllerUnpublishVolume failed with error") + if err != nil { + glog.Errorf("Failed to detach disk: %v", err) + } + }() // Stage Disk stageDir := filepath.Join("/tmp/", volName, "stage") - client.NodeStageVolume(volID, stageDir) - Expect(err).To(BeNil(), "NodeStageVolume failed with error") + err = client.NodeStageVolume(volID, stageDir) + if err != nil { + return fmt.Errorf("NodeStageVolume failed with error: %v", err) + } defer func() { // Unstage Disk err = client.NodeUnstageVolume(volID, stageDir) - Expect(err).To(BeNil(), "NodeUnstageVolume failed with error") - err = testutils.RmAll(instance, filepath.Join("/tmp/", volName)) - Expect(err).To(BeNil(), "Failed to remove temp directory") + if err != nil { + glog.Errorf("Failed to unstage volume: %v", err) + } + fp := filepath.Join("/tmp/", volName) + err = testutils.RmAll(instance, fp) + if err != nil { + glog.Errorf("Failed to rm file path %s: %v", fp, err) + } }() // Mount Disk publishDir := filepath.Join("/tmp/", volName, "mount") err = client.NodePublishVolume(volID, stageDir, publishDir) - Expect(err).To(BeNil(), "NodePublishVolume failed with error") + if err != nil { + return fmt.Errorf("NodePublishVolume failed with error: %v", err) + } err = testutils.ForceChmod(instance, filepath.Join("/tmp/", volName), "777") - Expect(err).To(BeNil(), "Chmod failed with error") + if err != nil { + return fmt.Errorf("Chmod failed with error: %v", err) + } testFileContents := "test" if !readOnly { // Write a file testFile := filepath.Join(publishDir, "testfile") err = testutils.WriteFile(instance, testFile, testFileContents) - Expect(err).To(BeNil(), "Failed to write file") + if err != nil { + return fmt.Errorf("Failed to write file: %v", err) + } } // Unmount Disk err = client.NodeUnpublishVolume(volID, publishDir) - Expect(err).To(BeNil(), "NodeUnpublishVolume failed with error") + if err != nil { + return fmt.Errorf("NodeUnpublishVolume failed with error: %v", err) + } // Mount disk somewhere else secondPublishDir := filepath.Join("/tmp/", volName, "secondmount") err = client.NodePublishVolume(volID, stageDir, secondPublishDir) - Expect(err).To(BeNil(), "NodePublishVolume failed with error") + if err != nil { + return fmt.Errorf("NodePublishVolume failed with error: %v", err) + } err = testutils.ForceChmod(instance, filepath.Join("/tmp/", volName), "777") - Expect(err).To(BeNil(), "Chmod failed with error") + if err != nil { + return fmt.Errorf("Chmod failed with error: %v", err) + } // Read File secondTestFile := filepath.Join(secondPublishDir, "testfile") readContents, err := testutils.ReadFile(instance, secondTestFile) - Expect(err).To(BeNil(), "ReadFile failed with error") + if err != nil { + return fmt.Errorf("ReadFile failed with error: %v", err) + } Expect(strings.TrimSpace(string(readContents))).To(Equal(testFileContents)) // Unmount Disk err = client.NodeUnpublishVolume(volID, secondPublishDir) - Expect(err).To(BeNil(), "NodeUnpublishVolume failed with error") + if err != nil { + return fmt.Errorf("NodeUnpublishVolume failed with error: %v", err) + } glog.Infof("Completed testAttachWriteReadDetach with volume %v node %v\n", volID, instance.GetNodeID()) + return nil } diff --git a/test/e2e/tests/setup_e2e_test.go b/test/e2e/tests/setup_e2e_test.go index 884a5539c..e458a9f42 100644 --- a/test/e2e/tests/setup_e2e_test.go +++ b/test/e2e/tests/setup_e2e_test.go @@ -15,12 +15,14 @@ limitations under the License. package tests import ( + "context" "flag" "fmt" "math/rand" "testing" "time" + cloudkms "cloud.google.com/go/kms/apiv1" "github.com/golang/glog" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -39,6 +41,7 @@ var ( testContexts = []*remote.TestContext{} computeService *compute.Service betaComputeService *computebeta.Service + kmsClient *cloudkms.KeyManagementClient ) func TestE2E(t *testing.T) { @@ -62,6 +65,10 @@ var _ = BeforeSuite(func() { betaComputeService, err = remote.GetBetaComputeClient() Expect(err).To(BeNil()) + // Create the KMS client. + kmsClient, err = cloudkms.NewKeyManagementClient(context.Background()) + Expect(err).To(BeNil()) + if *runInProw { *project, *serviceAccount = testutils.SetupProwConfig("gce-project") } diff --git a/test/e2e/tests/single_zone_e2e_test.go b/test/e2e/tests/single_zone_e2e_test.go index f168b0ca4..bc956cc61 100644 --- a/test/e2e/tests/single_zone_e2e_test.go +++ b/test/e2e/tests/single_zone_e2e_test.go @@ -15,6 +15,8 @@ limitations under the License. package tests import ( + "context" + "fmt" "strings" "time" @@ -26,6 +28,10 @@ import ( csi "github.com/container-storage-interface/spec/lib/go/csi" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + + "google.golang.org/api/iterator" + kmspb "google.golang.org/genproto/googleapis/cloud/kms/v1" + fieldmask "google.golang.org/genproto/protobuf/field_mask" ) const ( @@ -78,7 +84,8 @@ var _ = Describe("GCE PD CSI Driver", func() { }() // Attach Disk - testAttachWriteReadDetach(volID, volName, instance, client, false /* readOnly */) + err = testAttachWriteReadDetach(volID, volName, instance, client, false /* readOnly */) + Expect(err).To(BeNil(), "Failed to go through volume lifecycle") }) @@ -152,7 +159,8 @@ var _ = Describe("GCE PD CSI Driver", func() { }() // Attach Disk - testAttachWriteReadDetach(underSpecifiedID, volName, instance, client, false /* readOnly */) + err = testAttachWriteReadDetach(underSpecifiedID, volName, instance, client, false /* readOnly */) + Expect(err).To(BeNil(), "Failed to go through volume lifecycle") }) @@ -295,6 +303,160 @@ var _ = Describe("GCE PD CSI Driver", func() { }() }) + It("Should create CMEK key, go through volume lifecycle, validate behavior on key revoke and restore", func() { + ctx := context.Background() + Expect(testContexts).ToNot(BeEmpty()) + testContext := getRandomTestContext() + + controllerInstance := testContext.Instance + controllerClient := testContext.Client + + p, z, _ := controllerInstance.GetIdentity() + locationID := "global" + + // The resource name of the key rings. + parentName := fmt.Sprintf("projects/%s/locations/%s", p, locationID) + keyRingId := "gce-pd-csi-test-ring" + + // Create KeyRing + ringReq := &kmspb.CreateKeyRingRequest{ + Parent: parentName, + KeyRingId: keyRingId, + } + keyRing, err := kmsClient.CreateKeyRing(ctx, ringReq) + if !gce.IsGCEError(err, "alreadyExists") { + getKeyRingReq := &kmspb.GetKeyRingRequest{ + Name: fmt.Sprintf("%s/keyRings/%s", parentName, keyRingId), + } + keyRing, err = kmsClient.GetKeyRing(ctx, getKeyRingReq) + + } + Expect(err).To(BeNil(), "Failed to create or get key ring %v", keyRingId) + + // Create CryptoKey in KeyRing + keyId := "test-key-" + string(uuid.NewUUID()) + keyReq := &kmspb.CreateCryptoKeyRequest{ + Parent: keyRing.Name, + CryptoKeyId: keyId, + CryptoKey: &kmspb.CryptoKey{ + Purpose: kmspb.CryptoKey_ENCRYPT_DECRYPT, + VersionTemplate: &kmspb.CryptoKeyVersionTemplate{ + Algorithm: kmspb.CryptoKeyVersion_GOOGLE_SYMMETRIC_ENCRYPTION, + }, + }, + } + key, err := kmsClient.CreateCryptoKey(ctx, keyReq) + Expect(err).To(BeNil(), "Failed to create crypto key %v in key ring %v", keyId, keyRing.Name) + + keyVersions := []string{} + keyVersionReq := &kmspb.ListCryptoKeyVersionsRequest{ + Parent: key.Name, + } + + it := kmsClient.ListCryptoKeyVersions(ctx, keyVersionReq) + + for { + keyVersion, err := it.Next() + if err == iterator.Done { + break + } + Expect(err).To(BeNil(), "Failed to list crypto key versions") + + keyVersions = append(keyVersions, keyVersion.Name) + } + + // Defer deletion of all key versions + // https://cloud.google.com/kms/docs/destroy-restore + defer func() { + + for _, keyVersion := range keyVersions { + destroyKeyReq := &kmspb.DestroyCryptoKeyVersionRequest{ + Name: keyVersion, + } + _, err = kmsClient.DestroyCryptoKeyVersion(ctx, destroyKeyReq) + Expect(err).To(BeNil(), "Failed to destroy crypto key version: %v", keyVersion) + } + + }() + + // Go through volume lifecycle using CMEK-ed PD + // Create Disk + volName := testNamePrefix + string(uuid.NewUUID()) + volID, err := controllerClient.CreateVolume(volName, map[string]string{ + common.ParameterKeyDiskEncryptionKmsKey: key.Name, + }, defaultSizeGb, + &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: z}, + }, + }, + }) + Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + + // Validate Disk Created + cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() + Expect(err).To(BeNil(), "Could not get disk from cloud directly") + Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) + Expect(cloudDisk.Status).To(Equal(readyState)) + Expect(cloudDisk.SizeGb).To(Equal(defaultSizeGb)) + Expect(cloudDisk.Name).To(Equal(volName)) + + defer func() { + // Delete Disk + err = controllerClient.DeleteVolume(volID) + Expect(err).To(BeNil(), "DeleteVolume failed") + + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() + + // Test disk works + err = testAttachWriteReadDetach(volID, volName, controllerInstance, controllerClient, false /* readOnly */) + Expect(err).To(BeNil(), "Failed to go through volume lifecycle before revoking CMEK key") + + // Revoke CMEK key + // https://cloud.google.com/kms/docs/enable-disable + + for _, keyVersion := range keyVersions { + disableReq := &kmspb.UpdateCryptoKeyVersionRequest{ + CryptoKeyVersion: &kmspb.CryptoKeyVersion{ + Name: keyVersion, + State: kmspb.CryptoKeyVersion_DISABLED, + }, + UpdateMask: &fieldmask.FieldMask{ + Paths: []string{"state"}, + }, + } + _, err = kmsClient.UpdateCryptoKeyVersion(ctx, disableReq) + Expect(err).To(BeNil(), "Failed to disable crypto key") + } + + // Make sure attach of PD fails + err = testAttachWriteReadDetach(volID, volName, controllerInstance, controllerClient, false /* readOnly */) + Expect(err).ToNot(BeNil(), "Volume lifecycle should have failed, but succeeded") + + // Restore CMEK key + for _, keyVersion := range keyVersions { + enableReq := &kmspb.UpdateCryptoKeyVersionRequest{ + CryptoKeyVersion: &kmspb.CryptoKeyVersion{ + Name: keyVersion, + State: kmspb.CryptoKeyVersion_ENABLED, + }, + UpdateMask: &fieldmask.FieldMask{ + Paths: []string{"state"}, + }, + } + _, err = kmsClient.UpdateCryptoKeyVersion(ctx, enableReq) + Expect(err).To(BeNil(), "Failed to enable crypto key") + } + + // Make sure attach of PD succeeds + err = testAttachWriteReadDetach(volID, volName, controllerInstance, controllerClient, false /* readOnly */) + Expect(err).To(BeNil(), "Failed to go through volume lifecycle after restoring CMEK key") + }) + It("Should create and delete snapshot for RePD in two zones ", func() { Expect(testContexts).ToNot(BeEmpty()) testContext := getRandomTestContext() diff --git a/vendor/cloud.google.com/go/CONTRIBUTORS b/vendor/cloud.google.com/go/CONTRIBUTORS index eb55275bf..3b3cbed98 100644 --- a/vendor/cloud.google.com/go/CONTRIBUTORS +++ b/vendor/cloud.google.com/go/CONTRIBUTORS @@ -22,6 +22,7 @@ David Symonds Filippo Valsorda Glenn Lewis Ingo Oeser +James Hall Johan Euphrosine Jonathan Amsterdam Kunpei Sakai diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index e708c031b..0d929a619 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -1,4 +1,4 @@ -// Copyright 2014 Google Inc. All Rights Reserved. +// Copyright 2014 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ package metadata // import "cloud.google.com/go/compute/metadata" import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -31,9 +32,6 @@ import ( "strings" "sync" "time" - - "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" ) const ( @@ -64,7 +62,7 @@ var ( ) var ( - metaClient = &http.Client{ + defaultClient = &Client{hc: &http.Client{ Transport: &http.Transport{ Dial: (&net.Dialer{ Timeout: 2 * time.Second, @@ -72,15 +70,15 @@ var ( }).Dial, ResponseHeaderTimeout: 2 * time.Second, }, - } - subscribeClient = &http.Client{ + }} + subscribeClient = &Client{hc: &http.Client{ Transport: &http.Transport{ Dial: (&net.Dialer{ Timeout: 2 * time.Second, KeepAlive: 30 * time.Second, }).Dial, }, - } + }} ) // NotDefinedError is returned when requested metadata is not defined. @@ -95,74 +93,16 @@ func (suffix NotDefinedError) Error() string { return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) } -// Get returns a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// -// If the GCE_METADATA_HOST environment variable is not defined, a default of -// 169.254.169.254 will be used instead. -// -// If the requested metadata is not defined, the returned error will -// be of type NotDefinedError. -func Get(suffix string) (string, error) { - val, _, err := getETag(metaClient, suffix) - return val, err -} - -// getETag returns a value from the metadata service as well as the associated -// ETag using the provided client. This func is otherwise equivalent to Get. -func getETag(client *http.Client, suffix string) (value, etag string, err error) { - // Using a fixed IP makes it very difficult to spoof the metadata service in - // a container, which is an important use-case for local testing of cloud - // deployments. To enable spoofing of the metadata service, the environment - // variable GCE_METADATA_HOST is first inspected to decide where metadata - // requests shall go. - host := os.Getenv(metadataHostEnv) - if host == "" { - // Using 169.254.169.254 instead of "metadata" here because Go - // binaries built with the "netgo" tag and without cgo won't - // know the search suffix for "metadata" is - // ".google.internal", and this IP address is documented as - // being stable anyway. - host = metadataIP - } - url := "http://" + host + "/computeMetadata/v1/" + suffix - req, _ := http.NewRequest("GET", url, nil) - req.Header.Set("Metadata-Flavor", "Google") - req.Header.Set("User-Agent", userAgent) - res, err := client.Do(req) - if err != nil { - return "", "", err - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return "", "", NotDefinedError(suffix) - } - if res.StatusCode != 200 { - return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) - } - all, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", err - } - return string(all), res.Header.Get("Etag"), nil -} - -func getTrimmed(suffix string) (s string, err error) { - s, err = Get(suffix) - s = strings.TrimSpace(s) - return -} - -func (c *cachedValue) get() (v string, err error) { +func (c *cachedValue) get(cl *Client) (v string, err error) { defer c.mu.Unlock() c.mu.Lock() if c.v != "" { return c.v, nil } if c.trim { - v, err = getTrimmed(c.k) + v, err = cl.getTrimmed(c.k) } else { - v, err = Get(c.k) + v, err = cl.Get(c.k) } if err == nil { c.v = v @@ -201,7 +141,7 @@ func testOnGCE() bool { go func() { req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) req.Header.Set("User-Agent", userAgent) - res, err := ctxhttp.Do(ctx, metaClient, req) + res, err := defaultClient.hc.Do(req.WithContext(ctx)) if err != nil { resc <- false return @@ -266,78 +206,183 @@ func systemInfoSuggestsGCE() bool { return name == "Google" || name == "Google Compute Engine" } -// Subscribe subscribes to a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// The suffix may contain query parameters. -// -// Subscribe calls fn with the latest metadata value indicated by the provided -// suffix. If the metadata value is deleted, fn is called with the empty string -// and ok false. Subscribe blocks until fn returns a non-nil error or the value -// is deleted. Subscribe returns the error value returned from the last call to -// fn, which may be nil when ok == false. +// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no +// ResponseHeaderTimeout). func Subscribe(suffix string, fn func(v string, ok bool) error) error { - const failedSubscribeSleep = time.Second * 5 + return subscribeClient.Subscribe(suffix, fn) +} - // First check to see if the metadata value exists at all. - val, lastETag, err := getETag(subscribeClient, suffix) - if err != nil { - return err - } +// Get calls Client.Get on the default client. +func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } - if err := fn(val, true); err != nil { - return err +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return defaultClient.ProjectID() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { return defaultClient.InternalIP() } + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { return defaultClient.ExternalIP() } + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { return defaultClient.Hostname() } + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { return defaultClient.InstanceID() } + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { return defaultClient.InstanceName() } + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { return defaultClient.Zone() } + +// InstanceAttributes calls Client.InstanceAttributes on the default client. +func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } + +// ProjectAttributes calls Client.ProjectAttributes on the default client. +func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } + +// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. +func InstanceAttributeValue(attr string) (string, error) { + return defaultClient.InstanceAttributeValue(attr) +} + +// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. +func ProjectAttributeValue(attr string) (string, error) { + return defaultClient.ProjectAttributeValue(attr) +} + +// Scopes calls Client.Scopes on the default client. +func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } } + return false +} - ok := true - if strings.ContainsRune(suffix, '?') { - suffix += "&wait_for_change=true&last_etag=" - } else { - suffix += "?wait_for_change=true&last_etag=" +// A Client provides metadata. +type Client struct { + hc *http.Client +} + +// NewClient returns a Client that can be used to fetch metadata. All HTTP requests +// will use the given http.Client instead of the default client. +func NewClient(c *http.Client) *Client { + return &Client{hc: c} +} + +// getETag returns a value from the metadata service as well as the associated ETag. +// This func is otherwise equivalent to Get. +func (c *Client) getETag(suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP } - for { - val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag)) - if err != nil { - if _, deleted := err.(NotDefinedError); !deleted { - time.Sleep(failedSubscribeSleep) - continue // Retry on other errors. - } - ok = false - } - lastETag = etag + url := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Metadata-Flavor", "Google") + req.Header.Set("User-Agent", userAgent) + res, err := c.hc.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + if res.StatusCode != 200 { + return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + return string(all), res.Header.Get("Etag"), nil +} - if err := fn(val, ok); err != nil || !ok { - return err - } +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func (c *Client) Get(suffix string) (string, error) { + val, _, err := c.getETag(suffix) + return val, err +} + +func (c *Client) getTrimmed(suffix string) (s string, err error) { + s, err = c.Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *Client) lines(suffix string) ([]string, error) { + j, err := c.Get(suffix) + if err != nil { + return nil, err } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil } // ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return projID.get() } +func (c *Client) ProjectID() (string, error) { return projID.get(c) } // NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return projNum.get() } +func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } + +// InstanceID returns the current VM's numeric instance ID. +func (c *Client) InstanceID() (string, error) { return instID.get(c) } // InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { - return getTrimmed("instance/network-interfaces/0/ip") +func (c *Client) InternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/ip") } // ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { - return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +func (c *Client) ExternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") } // Hostname returns the instance's hostname. This will be of the form // ".c..internal". -func Hostname() (string, error) { - return getTrimmed("instance/hostname") +func (c *Client) Hostname() (string, error) { + return c.getTrimmed("instance/hostname") } // InstanceTags returns the list of user-defined instance tags, // assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { +func (c *Client) InstanceTags() ([]string, error) { var s []string - j, err := Get("instance/tags") + j, err := c.Get("instance/tags") if err != nil { return nil, err } @@ -347,14 +392,9 @@ func InstanceTags() ([]string, error) { return s, nil } -// InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { - return instID.get() -} - // InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { - host, err := Hostname() +func (c *Client) InstanceName() (string, error) { + host, err := c.Hostname() if err != nil { return "", err } @@ -362,8 +402,8 @@ func InstanceName() (string, error) { } // Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { - zone, err := getTrimmed("instance/zone") +func (c *Client) Zone() (string, error) { + zone, err := c.getTrimmed("instance/zone") // zone is of the form "projects//zones/". if err != nil { return "", err @@ -374,24 +414,12 @@ func Zone() (string, error) { // InstanceAttributes returns the list of user-defined attributes, // assigned when initially creating a GCE VM instance. The value of an // attribute can be obtained with InstanceAttributeValue. -func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } +func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } // ProjectAttributes returns the list of user-defined attributes // applying to the project as a whole, not just this VM. The value of // an attribute can be obtained with ProjectAttributeValue. -func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } - -func lines(suffix string) ([]string, error) { - j, err := Get(suffix) - if err != nil { - return nil, err - } - s := strings.Split(strings.TrimSpace(j), "\n") - for i := range s { - s[i] = strings.TrimSpace(s[i]) - } - return s, nil -} +func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } // InstanceAttributeValue returns the value of the provided VM // instance attribute. @@ -401,8 +429,8 @@ func lines(suffix string) ([]string, error) { // // InstanceAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. -func InstanceAttributeValue(attr string) (string, error) { - return Get("instance/attributes/" + attr) +func (c *Client) InstanceAttributeValue(attr string) (string, error) { + return c.Get("instance/attributes/" + attr) } // ProjectAttributeValue returns the value of the provided @@ -413,25 +441,61 @@ func InstanceAttributeValue(attr string) (string, error) { // // ProjectAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. -func ProjectAttributeValue(attr string) (string, error) { - return Get("project/attributes/" + attr) +func (c *Client) ProjectAttributeValue(attr string) (string, error) { + return c.Get("project/attributes/" + attr) } // Scopes returns the service account scopes for the given account. // The account may be empty or the string "default" to use the instance's // main account. -func Scopes(serviceAccount string) ([]string, error) { +func (c *Client) Scopes(serviceAccount string) ([]string, error) { if serviceAccount == "" { serviceAccount = "default" } - return lines("instance/service-accounts/" + serviceAccount + "/scopes") + return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") } -func strsContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := c.getETag(suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err } } - return false } diff --git a/vendor/cloud.google.com/go/iam/iam.go b/vendor/cloud.google.com/go/iam/iam.go new file mode 100644 index 000000000..76df63561 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/iam.go @@ -0,0 +1,292 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package iam supports the resource-specific operations of Google Cloud +// IAM (Identity and Access Management) for the Google Cloud Libraries. +// See https://cloud.google.com/iam for more about IAM. +// +// Users of the Google Cloud Libraries will typically not use this package +// directly. Instead they will begin with some resource that supports IAM, like +// a pubsub topic, and call its IAM method to get a Handle for that resource. +package iam + +import ( + "context" + "time" + + gax "github.com/googleapis/gax-go" + pb "google.golang.org/genproto/googleapis/iam/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// client abstracts the IAMPolicy API to allow multiple implementations. +type client interface { + Get(ctx context.Context, resource string) (*pb.Policy, error) + Set(ctx context.Context, resource string, p *pb.Policy) error + Test(ctx context.Context, resource string, perms []string) ([]string, error) +} + +// grpcClient implements client for the standard gRPC-based IAMPolicy service. +type grpcClient struct { + c pb.IAMPolicyClient +} + +var withRetry = gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60 * time.Second, + Multiplier: 1.3, + }) +}) + +func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) { + var proto *pb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + var err error + proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource}) + return err + }, withRetry) + if err != nil { + return nil, err + } + return proto, nil +} + +func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error { + return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + _, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{ + Resource: resource, + Policy: p, + }) + return err + }, withRetry) +} + +func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { + var res *pb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + var err error + res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{ + Resource: resource, + Permissions: perms, + }) + return err + }, withRetry) + if err != nil { + return nil, err + } + return res.Permissions, nil +} + +// A Handle provides IAM operations for a resource. +type Handle struct { + c client + resource string +} + +// InternalNewHandle is for use by the Google Cloud Libraries only. +// +// InternalNewHandle returns a Handle for resource. +// The conn parameter refers to a server that must support the IAMPolicy service. +func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle { + return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource) +} + +// InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only. +// +// InternalNewHandleClient returns a Handle for resource using the given +// grpc service that implements IAM as a mixin +func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle { + return InternalNewHandleClient(&grpcClient{c: c}, resource) +} + +// InternalNewHandleClient is for use by the Google Cloud Libraries only. +// +// InternalNewHandleClient returns a Handle for resource using the given +// client implementation. +func InternalNewHandleClient(c client, resource string) *Handle { + return &Handle{ + c: c, + resource: resource, + } +} + +// Policy retrieves the IAM policy for the resource. +func (h *Handle) Policy(ctx context.Context) (*Policy, error) { + proto, err := h.c.Get(ctx, h.resource) + if err != nil { + return nil, err + } + return &Policy{InternalProto: proto}, nil +} + +// SetPolicy replaces the resource's current policy with the supplied Policy. +// +// If policy was created from a prior call to Get, then the modification will +// only succeed if the policy has not changed since the Get. +func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error { + return h.c.Set(ctx, h.resource, policy.InternalProto) +} + +// TestPermissions returns the subset of permissions that the caller has on the resource. +func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { + return h.c.Test(ctx, h.resource, permissions) +} + +// A RoleName is a name representing a collection of permissions. +type RoleName string + +// Common role names. +const ( + Owner RoleName = "roles/owner" + Editor RoleName = "roles/editor" + Viewer RoleName = "roles/viewer" +) + +const ( + // AllUsers is a special member that denotes all users, even unauthenticated ones. + AllUsers = "allUsers" + + // AllAuthenticatedUsers is a special member that denotes all authenticated users. + AllAuthenticatedUsers = "allAuthenticatedUsers" +) + +// A Policy is a list of Bindings representing roles +// granted to members. +// +// The zero Policy is a valid policy with no bindings. +type Policy struct { + // TODO(jba): when type aliases are available, put Policy into an internal package + // and provide an exported alias here. + + // This field is exported for use by the Google Cloud Libraries only. + // It may become unexported in a future release. + InternalProto *pb.Policy +} + +// Members returns the list of members with the supplied role. +// The return value should not be modified. Use Add and Remove +// to modify the members of a role. +func (p *Policy) Members(r RoleName) []string { + b := p.binding(r) + if b == nil { + return nil + } + return b.Members +} + +// HasRole reports whether member has role r. +func (p *Policy) HasRole(member string, r RoleName) bool { + return memberIndex(member, p.binding(r)) >= 0 +} + +// Add adds member member to role r if it is not already present. +// A new binding is created if there is no binding for the role. +func (p *Policy) Add(member string, r RoleName) { + b := p.binding(r) + if b == nil { + if p.InternalProto == nil { + p.InternalProto = &pb.Policy{} + } + p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{ + Role: string(r), + Members: []string{member}, + }) + return + } + if memberIndex(member, b) < 0 { + b.Members = append(b.Members, member) + return + } +} + +// Remove removes member from role r if it is present. +func (p *Policy) Remove(member string, r RoleName) { + bi := p.bindingIndex(r) + if bi < 0 { + return + } + bindings := p.InternalProto.Bindings + b := bindings[bi] + mi := memberIndex(member, b) + if mi < 0 { + return + } + // Order doesn't matter for bindings or members, so to remove, move the last item + // into the removed spot and shrink the slice. + if len(b.Members) == 1 { + // Remove binding. + last := len(bindings) - 1 + bindings[bi] = bindings[last] + bindings[last] = nil + p.InternalProto.Bindings = bindings[:last] + return + } + // Remove member. + // TODO(jba): worry about multiple copies of m? + last := len(b.Members) - 1 + b.Members[mi] = b.Members[last] + b.Members[last] = "" + b.Members = b.Members[:last] +} + +// Roles returns the names of all the roles that appear in the Policy. +func (p *Policy) Roles() []RoleName { + if p.InternalProto == nil { + return nil + } + var rns []RoleName + for _, b := range p.InternalProto.Bindings { + rns = append(rns, RoleName(b.Role)) + } + return rns +} + +// binding returns the Binding for the suppied role, or nil if there isn't one. +func (p *Policy) binding(r RoleName) *pb.Binding { + i := p.bindingIndex(r) + if i < 0 { + return nil + } + return p.InternalProto.Bindings[i] +} + +func (p *Policy) bindingIndex(r RoleName) int { + if p.InternalProto == nil { + return -1 + } + for i, b := range p.InternalProto.Bindings { + if b.Role == string(r) { + return i + } + } + return -1 +} + +// memberIndex returns the index of m in b's Members, or -1 if not found. +func memberIndex(m string, b *pb.Binding) int { + if b == nil { + return -1 + } + for i, mm := range b.Members { + if mm == m { + return i + } + } + return -1 +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/doc.go b/vendor/cloud.google.com/go/kms/apiv1/doc.go new file mode 100644 index 000000000..2769e7f5d --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/doc.go @@ -0,0 +1,89 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package kms is an auto-generated package for the +// Cloud Key Management Service (KMS) API. + +// +// Manages keys and performs cryptographic operations in a central cloud +// service, for direct use by other cloud resources and applications. +package kms // import "cloud.google.com/go/kms/apiv1" + +import ( + "context" + "runtime" + "strings" + "unicode" + + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} + +// versionGo returns the Go runtime version. The returned string +// has no whitespace, suitable for reporting in header. +func versionGo() string { + const develPrefix = "devel +" + + s := runtime.Version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + notSemverRune := func(r rune) bool { + return strings.IndexRune("0123456789.", r) < 0 + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "UNKNOWN" +} + +const versionClient = "20181129" diff --git a/vendor/cloud.google.com/go/kms/apiv1/iam.go b/vendor/cloud.google.com/go/kms/apiv1/iam.go new file mode 100644 index 000000000..8c572a98e --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/iam.go @@ -0,0 +1,30 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kms + +import ( + "cloud.google.com/go/iam" + kmspb "google.golang.org/genproto/googleapis/cloud/kms/v1" +) + +// KeyRingIAM returns a handle to inspect and change permissions of a KeyRing. +func (c *KeyManagementClient) KeyRingIAM(keyRing *kmspb.KeyRing) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), keyRing.Name) +} + +// CryptoKeyIAM returns a handle to inspect and change permissions of a CryptoKey. +func (c *KeyManagementClient) CryptoKeyIAM(cryptoKey *kmspb.CryptoKey) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), cryptoKey.Name) +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go b/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go new file mode 100644 index 000000000..a7a8b9e83 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go @@ -0,0 +1,725 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + "math" + "time" + + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + kmspb "google.golang.org/genproto/googleapis/cloud/kms/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// KeyManagementCallOptions contains the retry settings for each method of KeyManagementClient. +type KeyManagementCallOptions struct { + ListKeyRings []gax.CallOption + ListCryptoKeys []gax.CallOption + ListCryptoKeyVersions []gax.CallOption + GetKeyRing []gax.CallOption + GetCryptoKey []gax.CallOption + GetCryptoKeyVersion []gax.CallOption + CreateKeyRing []gax.CallOption + CreateCryptoKey []gax.CallOption + CreateCryptoKeyVersion []gax.CallOption + UpdateCryptoKey []gax.CallOption + UpdateCryptoKeyVersion []gax.CallOption + Encrypt []gax.CallOption + Decrypt []gax.CallOption + UpdateCryptoKeyPrimaryVersion []gax.CallOption + DestroyCryptoKeyVersion []gax.CallOption + RestoreCryptoKeyVersion []gax.CallOption + GetPublicKey []gax.CallOption + AsymmetricDecrypt []gax.CallOption + AsymmetricSign []gax.CallOption +} + +func defaultKeyManagementClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("cloudkms.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultKeyManagementCallOptions() *KeyManagementCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &KeyManagementCallOptions{ + ListKeyRings: retry[[2]string{"default", "idempotent"}], + ListCryptoKeys: retry[[2]string{"default", "idempotent"}], + ListCryptoKeyVersions: retry[[2]string{"default", "idempotent"}], + GetKeyRing: retry[[2]string{"default", "idempotent"}], + GetCryptoKey: retry[[2]string{"default", "idempotent"}], + GetCryptoKeyVersion: retry[[2]string{"default", "idempotent"}], + CreateKeyRing: retry[[2]string{"default", "non_idempotent"}], + CreateCryptoKey: retry[[2]string{"default", "non_idempotent"}], + CreateCryptoKeyVersion: retry[[2]string{"default", "non_idempotent"}], + UpdateCryptoKey: retry[[2]string{"default", "non_idempotent"}], + UpdateCryptoKeyVersion: retry[[2]string{"default", "non_idempotent"}], + Encrypt: retry[[2]string{"default", "non_idempotent"}], + Decrypt: retry[[2]string{"default", "non_idempotent"}], + UpdateCryptoKeyPrimaryVersion: retry[[2]string{"default", "non_idempotent"}], + DestroyCryptoKeyVersion: retry[[2]string{"default", "non_idempotent"}], + RestoreCryptoKeyVersion: retry[[2]string{"default", "non_idempotent"}], + GetPublicKey: retry[[2]string{"default", "idempotent"}], + AsymmetricDecrypt: retry[[2]string{"default", "non_idempotent"}], + AsymmetricSign: retry[[2]string{"default", "non_idempotent"}], + } +} + +// KeyManagementClient is a client for interacting with Cloud Key Management Service (KMS) API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type KeyManagementClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + keyManagementClient kmspb.KeyManagementServiceClient + + // The call options for this service. + CallOptions *KeyManagementCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewKeyManagementClient creates a new key management service client. +// +// Google Cloud Key Management Service +// +// Manages cryptographic keys and operations using those keys. Implements a REST +// model with the following objects: +// +// [KeyRing][google.cloud.kms.v1.KeyRing] +// +// [CryptoKey][google.cloud.kms.v1.CryptoKey] +// +// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] +// +// If you are using manual gRPC libraries, see +// Using gRPC with Cloud KMS (at https://cloud.google.com/kms/docs/grpc). +func NewKeyManagementClient(ctx context.Context, opts ...option.ClientOption) (*KeyManagementClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultKeyManagementClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &KeyManagementClient{ + conn: conn, + CallOptions: defaultKeyManagementCallOptions(), + + keyManagementClient: kmspb.NewKeyManagementServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *KeyManagementClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *KeyManagementClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *KeyManagementClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListKeyRings lists [KeyRings][google.cloud.kms.v1.KeyRing]. +func (c *KeyManagementClient) ListKeyRings(ctx context.Context, req *kmspb.ListKeyRingsRequest, opts ...gax.CallOption) *KeyRingIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListKeyRings[0:len(c.CallOptions.ListKeyRings):len(c.CallOptions.ListKeyRings)], opts...) + it := &KeyRingIterator{} + req = proto.Clone(req).(*kmspb.ListKeyRingsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.KeyRing, string, error) { + var resp *kmspb.ListKeyRingsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.ListKeyRings(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.KeyRings, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + return it +} + +// ListCryptoKeys lists [CryptoKeys][google.cloud.kms.v1.CryptoKey]. +func (c *KeyManagementClient) ListCryptoKeys(ctx context.Context, req *kmspb.ListCryptoKeysRequest, opts ...gax.CallOption) *CryptoKeyIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListCryptoKeys[0:len(c.CallOptions.ListCryptoKeys):len(c.CallOptions.ListCryptoKeys)], opts...) + it := &CryptoKeyIterator{} + req = proto.Clone(req).(*kmspb.ListCryptoKeysRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.CryptoKey, string, error) { + var resp *kmspb.ListCryptoKeysResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.ListCryptoKeys(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.CryptoKeys, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + return it +} + +// ListCryptoKeyVersions lists [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion]. +func (c *KeyManagementClient) ListCryptoKeyVersions(ctx context.Context, req *kmspb.ListCryptoKeyVersionsRequest, opts ...gax.CallOption) *CryptoKeyVersionIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListCryptoKeyVersions[0:len(c.CallOptions.ListCryptoKeyVersions):len(c.CallOptions.ListCryptoKeyVersions)], opts...) + it := &CryptoKeyVersionIterator{} + req = proto.Clone(req).(*kmspb.ListCryptoKeyVersionsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.CryptoKeyVersion, string, error) { + var resp *kmspb.ListCryptoKeyVersionsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.ListCryptoKeyVersions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.CryptoKeyVersions, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + return it +} + +// GetKeyRing returns metadata for a given [KeyRing][google.cloud.kms.v1.KeyRing]. +func (c *KeyManagementClient) GetKeyRing(ctx context.Context, req *kmspb.GetKeyRingRequest, opts ...gax.CallOption) (*kmspb.KeyRing, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetKeyRing[0:len(c.CallOptions.GetKeyRing):len(c.CallOptions.GetKeyRing)], opts...) + var resp *kmspb.KeyRing + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.GetKeyRing(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetCryptoKey returns metadata for a given [CryptoKey][google.cloud.kms.v1.CryptoKey], as well as its +// [primary][google.cloud.kms.v1.CryptoKey.primary] [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. +func (c *KeyManagementClient) GetCryptoKey(ctx context.Context, req *kmspb.GetCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetCryptoKey[0:len(c.CallOptions.GetCryptoKey):len(c.CallOptions.GetCryptoKey)], opts...) + var resp *kmspb.CryptoKey + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.GetCryptoKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetCryptoKeyVersion returns metadata for a given [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. +func (c *KeyManagementClient) GetCryptoKeyVersion(ctx context.Context, req *kmspb.GetCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetCryptoKeyVersion[0:len(c.CallOptions.GetCryptoKeyVersion):len(c.CallOptions.GetCryptoKeyVersion)], opts...) + var resp *kmspb.CryptoKeyVersion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.GetCryptoKeyVersion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateKeyRing create a new [KeyRing][google.cloud.kms.v1.KeyRing] in a given Project and Location. +func (c *KeyManagementClient) CreateKeyRing(ctx context.Context, req *kmspb.CreateKeyRingRequest, opts ...gax.CallOption) (*kmspb.KeyRing, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateKeyRing[0:len(c.CallOptions.CreateKeyRing):len(c.CallOptions.CreateKeyRing)], opts...) + var resp *kmspb.KeyRing + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.CreateKeyRing(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateCryptoKey create a new [CryptoKey][google.cloud.kms.v1.CryptoKey] within a [KeyRing][google.cloud.kms.v1.KeyRing]. +// +// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] and +// [CryptoKey.version_template.algorithm][google.cloud.kms.v1.CryptoKeyVersionTemplate.algorithm] +// are required. +func (c *KeyManagementClient) CreateCryptoKey(ctx context.Context, req *kmspb.CreateCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateCryptoKey[0:len(c.CallOptions.CreateCryptoKey):len(c.CallOptions.CreateCryptoKey)], opts...) + var resp *kmspb.CryptoKey + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.CreateCryptoKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateCryptoKeyVersion create a new [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in a [CryptoKey][google.cloud.kms.v1.CryptoKey]. +// +// The server will assign the next sequential id. If unset, +// [state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set to +// [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED]. +func (c *KeyManagementClient) CreateCryptoKeyVersion(ctx context.Context, req *kmspb.CreateCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateCryptoKeyVersion[0:len(c.CallOptions.CreateCryptoKeyVersion):len(c.CallOptions.CreateCryptoKeyVersion)], opts...) + var resp *kmspb.CryptoKeyVersion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.CreateCryptoKeyVersion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateCryptoKey update a [CryptoKey][google.cloud.kms.v1.CryptoKey]. +func (c *KeyManagementClient) UpdateCryptoKey(ctx context.Context, req *kmspb.UpdateCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "crypto_key.name", req.GetCryptoKey().GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.UpdateCryptoKey[0:len(c.CallOptions.UpdateCryptoKey):len(c.CallOptions.UpdateCryptoKey)], opts...) + var resp *kmspb.CryptoKey + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.UpdateCryptoKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateCryptoKeyVersion update a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s metadata. +// +// [state][google.cloud.kms.v1.CryptoKeyVersion.state] may be changed between +// [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] and +// [DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED] using this +// method. See [DestroyCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion] and [RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion] to +// move between other states. +func (c *KeyManagementClient) UpdateCryptoKeyVersion(ctx context.Context, req *kmspb.UpdateCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "crypto_key_version.name", req.GetCryptoKeyVersion().GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.UpdateCryptoKeyVersion[0:len(c.CallOptions.UpdateCryptoKeyVersion):len(c.CallOptions.UpdateCryptoKeyVersion)], opts...) + var resp *kmspb.CryptoKeyVersion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.UpdateCryptoKeyVersion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// Encrypt encrypts data, so that it can only be recovered by a call to [Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt]. +// The [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be +// [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. +func (c *KeyManagementClient) Encrypt(ctx context.Context, req *kmspb.EncryptRequest, opts ...gax.CallOption) (*kmspb.EncryptResponse, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.Encrypt[0:len(c.CallOptions.Encrypt):len(c.CallOptions.Encrypt)], opts...) + var resp *kmspb.EncryptResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.Encrypt(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// Decrypt decrypts data that was protected by [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. The [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] +// must be [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. +func (c *KeyManagementClient) Decrypt(ctx context.Context, req *kmspb.DecryptRequest, opts ...gax.CallOption) (*kmspb.DecryptResponse, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.Decrypt[0:len(c.CallOptions.Decrypt):len(c.CallOptions.Decrypt)], opts...) + var resp *kmspb.DecryptResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.Decrypt(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateCryptoKeyPrimaryVersion update the version of a [CryptoKey][google.cloud.kms.v1.CryptoKey] that will be used in [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. +// +// Returns an error if called on an asymmetric key. +func (c *KeyManagementClient) UpdateCryptoKeyPrimaryVersion(ctx context.Context, req *kmspb.UpdateCryptoKeyPrimaryVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.UpdateCryptoKeyPrimaryVersion[0:len(c.CallOptions.UpdateCryptoKeyPrimaryVersion):len(c.CallOptions.UpdateCryptoKeyPrimaryVersion)], opts...) + var resp *kmspb.CryptoKey + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.UpdateCryptoKeyPrimaryVersion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DestroyCryptoKeyVersion schedule a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] for destruction. +// +// Upon calling this method, [CryptoKeyVersion.state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set to +// [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED] +// and [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] will be set to a time 24 +// hours in the future, at which point the [state][google.cloud.kms.v1.CryptoKeyVersion.state] +// will be changed to +// [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED], and the key +// material will be irrevocably destroyed. +// +// Before the [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] is reached, +// [RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion] may be called to reverse the process. +func (c *KeyManagementClient) DestroyCryptoKeyVersion(ctx context.Context, req *kmspb.DestroyCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.DestroyCryptoKeyVersion[0:len(c.CallOptions.DestroyCryptoKeyVersion):len(c.CallOptions.DestroyCryptoKeyVersion)], opts...) + var resp *kmspb.CryptoKeyVersion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.DestroyCryptoKeyVersion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// RestoreCryptoKeyVersion restore a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in the +// [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED] +// state. +// +// Upon restoration of the CryptoKeyVersion, [state][google.cloud.kms.v1.CryptoKeyVersion.state] +// will be set to [DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED], +// and [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] will be cleared. +func (c *KeyManagementClient) RestoreCryptoKeyVersion(ctx context.Context, req *kmspb.RestoreCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.RestoreCryptoKeyVersion[0:len(c.CallOptions.RestoreCryptoKeyVersion):len(c.CallOptions.RestoreCryptoKeyVersion)], opts...) + var resp *kmspb.CryptoKeyVersion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.RestoreCryptoKeyVersion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetPublicKey returns the public key for the given [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. The +// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be +// [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN] or +// [ASYMMETRIC_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_DECRYPT]. +func (c *KeyManagementClient) GetPublicKey(ctx context.Context, req *kmspb.GetPublicKeyRequest, opts ...gax.CallOption) (*kmspb.PublicKey, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetPublicKey[0:len(c.CallOptions.GetPublicKey):len(c.CallOptions.GetPublicKey)], opts...) + var resp *kmspb.PublicKey + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.GetPublicKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AsymmetricDecrypt decrypts data that was encrypted with a public key retrieved from +// [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey] corresponding to a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with +// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] ASYMMETRIC_DECRYPT. +func (c *KeyManagementClient) AsymmetricDecrypt(ctx context.Context, req *kmspb.AsymmetricDecryptRequest, opts ...gax.CallOption) (*kmspb.AsymmetricDecryptResponse, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.AsymmetricDecrypt[0:len(c.CallOptions.AsymmetricDecrypt):len(c.CallOptions.AsymmetricDecrypt)], opts...) + var resp *kmspb.AsymmetricDecryptResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.AsymmetricDecrypt(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AsymmetricSign signs data using a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] +// ASYMMETRIC_SIGN, producing a signature that can be verified with the public +// key retrieved from [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. +func (c *KeyManagementClient) AsymmetricSign(ctx context.Context, req *kmspb.AsymmetricSignRequest, opts ...gax.CallOption) (*kmspb.AsymmetricSignResponse, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.AsymmetricSign[0:len(c.CallOptions.AsymmetricSign):len(c.CallOptions.AsymmetricSign)], opts...) + var resp *kmspb.AsymmetricSignResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.AsymmetricSign(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CryptoKeyIterator manages a stream of *kmspb.CryptoKey. +type CryptoKeyIterator struct { + items []*kmspb.CryptoKey + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*kmspb.CryptoKey, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *CryptoKeyIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *CryptoKeyIterator) Next() (*kmspb.CryptoKey, error) { + var item *kmspb.CryptoKey + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *CryptoKeyIterator) bufLen() int { + return len(it.items) +} + +func (it *CryptoKeyIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// CryptoKeyVersionIterator manages a stream of *kmspb.CryptoKeyVersion. +type CryptoKeyVersionIterator struct { + items []*kmspb.CryptoKeyVersion + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*kmspb.CryptoKeyVersion, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *CryptoKeyVersionIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *CryptoKeyVersionIterator) Next() (*kmspb.CryptoKeyVersion, error) { + var item *kmspb.CryptoKeyVersion + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *CryptoKeyVersionIterator) bufLen() int { + return len(it.items) +} + +func (it *CryptoKeyVersionIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// KeyRingIterator manages a stream of *kmspb.KeyRing. +type KeyRingIterator struct { + items []*kmspb.KeyRing + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*kmspb.KeyRing, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *KeyRingIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *KeyRingIterator) Next() (*kmspb.KeyRing, error) { + var item *kmspb.KeyRing + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *KeyRingIterator) bufLen() int { + return len(it.items) +} + +func (it *KeyRingIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go new file mode 100644 index 000000000..442c0e099 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go @@ -0,0 +1,440 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/struct.proto + +package structpb // import "github.com/golang/protobuf/ptypes/struct" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (x NullValue) String() string { + return proto.EnumName(NullValue_name, int32(x)) +} +func (NullValue) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{0} +} +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (m *Struct) String() string { return proto.CompactTextString(m) } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{0} +} +func (*Struct) XXX_WellKnownType() string { return "Struct" } +func (m *Struct) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Struct.Unmarshal(m, b) +} +func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Struct.Marshal(b, m, deterministic) +} +func (dst *Struct) XXX_Merge(src proto.Message) { + xxx_messageInfo_Struct.Merge(dst, src) +} +func (m *Struct) XXX_Size() int { + return xxx_messageInfo_Struct.Size(m) +} +func (m *Struct) XXX_DiscardUnknown() { + xxx_messageInfo_Struct.DiscardUnknown(m) +} + +var xxx_messageInfo_Struct proto.InternalMessageInfo + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{1} +} +func (*Value) XXX_WellKnownType() string { return "Value" } +func (m *Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Value.Unmarshal(m, b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) +} +func (dst *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(dst, src) +} +func (m *Value) XXX_Size() int { + return xxx_messageInfo_Value.Size(m) +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Value proto.InternalMessageInfo + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,enum=google.protobuf.NullValue,oneof"` +} +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,oneof"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,oneof"` +} +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,oneof"` +} +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,oneof"` +} +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} +func (*Value_NumberValue) isValue_Kind() {} +func (*Value_StringValue) isValue_Kind() {} +func (*Value_BoolValue) isValue_Kind() {} +func (*Value_StructValue) isValue_Kind() {} +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + b.EncodeVarint(2<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.NumberValue)) + case *Value_StringValue: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *Value_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Value_StructValue: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StructValue); err != nil { + return err + } + case *Value_ListValue: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ListValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Value.Kind has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 1: // kind.null_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_NullValue{NullValue(x)} + return true, err + case 2: // kind.number_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Kind = &Value_NumberValue{math.Float64frombits(x)} + return true, err + case 3: // kind.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Kind = &Value_StringValue{x} + return true, err + case 4: // kind.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_BoolValue{x != 0} + return true, err + case 5: // kind.struct_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Struct) + err := b.DecodeMessage(msg) + m.Kind = &Value_StructValue{msg} + return true, err + case 6: // kind.list_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ListValue) + err := b.DecodeMessage(msg) + m.Kind = &Value_ListValue{msg} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + n += 1 // tag and wire + n += 8 + case *Value_StringValue: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Value_BoolValue: + n += 1 // tag and wire + n += 1 + case *Value_StructValue: + s := proto.Size(x.StructValue) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_ListValue: + s := proto.Size(x.ListValue) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (m *ListValue) String() string { return proto.CompactTextString(m) } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{2} +} +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } +func (m *ListValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListValue.Unmarshal(m, b) +} +func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) +} +func (dst *ListValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListValue.Merge(dst, src) +} +func (m *ListValue) XXX_Size() int { + return xxx_messageInfo_ListValue.Size(m) +} +func (m *ListValue) XXX_DiscardUnknown() { + xxx_messageInfo_ListValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ListValue proto.InternalMessageInfo + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) +} + +func init() { + proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_struct_3a5a94e0c7801b27) +} + +var fileDescriptor_struct_3a5a94e0c7801b27 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09, + 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94, + 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa, + 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff, + 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc, + 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15, + 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d, + 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce, + 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39, + 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab, + 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84, + 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48, + 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f, + 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59, + 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a, + 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64, + 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92, + 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25, + 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37, + 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6, + 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4, + 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda, + 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9, + 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53, + 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00, + 0x00, +} diff --git a/vendor/github.com/googleapis/gax-go/LICENSE b/vendor/github.com/googleapis/gax-go/LICENSE new file mode 100644 index 000000000..6d16b6578 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/LICENSE @@ -0,0 +1,27 @@ +Copyright 2016, Google Inc. +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/googleapis/gax-go/call_option.go b/vendor/github.com/googleapis/gax-go/call_option.go new file mode 100644 index 000000000..5bd48972b --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/call_option.go @@ -0,0 +1,71 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + v2 "github.com/googleapis/gax-go/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// CallOption is an option used by Invoke to control behaviors of RPC calls. +// CallOption works by modifying relevant fields of CallSettings. +type CallOption = v2.CallOption + +// Retryer is used by Invoke to determine retry behavior. +type Retryer = v2.Retryer + +// WithRetry sets CallSettings.Retry to fn. +func WithRetry(fn func() Retryer) CallOption { + return v2.WithRetry(fn) +} + +// OnCodes returns a Retryer that retries if and only if +// the previous attempt returns a GRPC error whose error code is stored in cc. +// Pause times between retries are specified by bo. +// +// bo is only used for its parameters; each Retryer has its own copy. +func OnCodes(cc []codes.Code, bo Backoff) Retryer { + return v2.OnCodes(cc, bo) +} + +// Backoff implements exponential backoff. +// The wait time between retries is a random value between 0 and the "retry envelope". +// The envelope starts at Initial and increases by the factor of Multiplier every retry, +// but is capped at Max. +type Backoff = v2.Backoff + +// WithGRPCOptions allows passing gRPC call options during client creation. +func WithGRPCOptions(opt ...grpc.CallOption) CallOption { + return v2.WithGRPCOptions(opt...) +} + +// CallSettings allow fine-grained control over how calls are made. +type CallSettings = v2.CallSettings diff --git a/vendor/github.com/googleapis/gax-go/gax.go b/vendor/github.com/googleapis/gax-go/gax.go new file mode 100644 index 000000000..006739804 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/gax.go @@ -0,0 +1,39 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gax contains a set of modules which aid the development of APIs +// for clients and servers based on gRPC and Google API conventions. +// +// Application code will rarely need to use this library directly. +// However, code generated automatically from API definition files can use it +// to simplify code generation and to provide more convenient and idiomatic API surfaces. +package gax + +// Version specifies the gax version. +const Version = "1.0.1" diff --git a/vendor/github.com/googleapis/gax-go/header.go b/vendor/github.com/googleapis/gax-go/header.go new file mode 100644 index 000000000..e1a0af1ba --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/header.go @@ -0,0 +1,40 @@ +// Copyright 2018, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import v2 "github.com/googleapis/gax-go/v2" + +// XGoogHeader is for use by the Google Cloud Libraries only. +// +// XGoogHeader formats key-value pairs. +// The resulting string is suitable for x-goog-api-client header. +func XGoogHeader(keyval ...string) string { + return v2.XGoogHeader(keyval...) +} diff --git a/vendor/github.com/googleapis/gax-go/invoke.go b/vendor/github.com/googleapis/gax-go/invoke.go new file mode 100644 index 000000000..6422d3f73 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/invoke.go @@ -0,0 +1,52 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "context" + "time" + + v2 "github.com/googleapis/gax-go/v2" +) + +// APICall is a user defined call stub. +type APICall = v2.APICall + +// Invoke calls the given APICall, +// performing retries as specified by opts, if any. +func Invoke(ctx context.Context, call APICall, opts ...CallOption) error { + return v2.Invoke(ctx, call, opts...) +} + +// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing. +// If interrupted, Sleep returns ctx.Err(). +func Sleep(ctx context.Context, d time.Duration) error { + return v2.Sleep(ctx, d) +} diff --git a/vendor/github.com/googleapis/gax-go/v2/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go new file mode 100644 index 000000000..b1d53dd19 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/call_option.go @@ -0,0 +1,161 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "math/rand" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// CallOption is an option used by Invoke to control behaviors of RPC calls. +// CallOption works by modifying relevant fields of CallSettings. +type CallOption interface { + // Resolve applies the option by modifying cs. + Resolve(cs *CallSettings) +} + +// Retryer is used by Invoke to determine retry behavior. +type Retryer interface { + // Retry reports whether a request should be retriedand how long to pause before retrying + // if the previous attempt returned with err. Invoke never calls Retry with nil error. + Retry(err error) (pause time.Duration, shouldRetry bool) +} + +type retryerOption func() Retryer + +func (o retryerOption) Resolve(s *CallSettings) { + s.Retry = o +} + +// WithRetry sets CallSettings.Retry to fn. +func WithRetry(fn func() Retryer) CallOption { + return retryerOption(fn) +} + +// OnCodes returns a Retryer that retries if and only if +// the previous attempt returns a GRPC error whose error code is stored in cc. +// Pause times between retries are specified by bo. +// +// bo is only used for its parameters; each Retryer has its own copy. +func OnCodes(cc []codes.Code, bo Backoff) Retryer { + return &boRetryer{ + backoff: bo, + codes: append([]codes.Code(nil), cc...), + } +} + +type boRetryer struct { + backoff Backoff + codes []codes.Code +} + +func (r *boRetryer) Retry(err error) (time.Duration, bool) { + st, ok := status.FromError(err) + if !ok { + return 0, false + } + c := st.Code() + for _, rc := range r.codes { + if c == rc { + return r.backoff.Pause(), true + } + } + return 0, false +} + +// Backoff implements exponential backoff. +// The wait time between retries is a random value between 0 and the "retry envelope". +// The envelope starts at Initial and increases by the factor of Multiplier every retry, +// but is capped at Max. +type Backoff struct { + // Initial is the initial value of the retry envelope, defaults to 1 second. + Initial time.Duration + + // Max is the maximum value of the retry envelope, defaults to 30 seconds. + Max time.Duration + + // Multiplier is the factor by which the retry envelope increases. + // It should be greater than 1 and defaults to 2. + Multiplier float64 + + // cur is the current retry envelope + cur time.Duration +} + +// Pause returns the next time.Duration that the caller should use to backoff. +func (bo *Backoff) Pause() time.Duration { + if bo.Initial == 0 { + bo.Initial = time.Second + } + if bo.cur == 0 { + bo.cur = bo.Initial + } + if bo.Max == 0 { + bo.Max = 30 * time.Second + } + if bo.Multiplier < 1 { + bo.Multiplier = 2 + } + // Select a duration between 1ns and the current max. It might seem + // counterintuitive to have so much jitter, but + // https://www.awsarchitectureblog.com/2015/03/backoff.html argues that + // that is the best strategy. + d := time.Duration(1 + rand.Int63n(int64(bo.cur))) + bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier) + if bo.cur > bo.Max { + bo.cur = bo.Max + } + return d +} + +type grpcOpt []grpc.CallOption + +func (o grpcOpt) Resolve(s *CallSettings) { + s.GRPC = o +} + +// WithGRPCOptions allows passing gRPC call options during client creation. +func WithGRPCOptions(opt ...grpc.CallOption) CallOption { + return grpcOpt(append([]grpc.CallOption(nil), opt...)) +} + +// CallSettings allow fine-grained control over how calls are made. +type CallSettings struct { + // Retry returns a Retryer to be used to control retry logic of a method call. + // If Retry is nil or the returned Retryer is nil, the call will not be retried. + Retry func() Retryer + + // CallOptions to be forwarded to GRPC. + GRPC []grpc.CallOption +} diff --git a/vendor/github.com/googleapis/gax-go/v2/gax.go b/vendor/github.com/googleapis/gax-go/v2/gax.go new file mode 100644 index 000000000..8040dcb0c --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/gax.go @@ -0,0 +1,39 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gax contains a set of modules which aid the development of APIs +// for clients and servers based on gRPC and Google API conventions. +// +// Application code will rarely need to use this library directly. +// However, code generated automatically from API definition files can use it +// to simplify code generation and to provide more convenient and idiomatic API surfaces. +package gax + +// Version specifies the gax-go version being used. +const Version = "2.0.3" diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go new file mode 100644 index 000000000..139371a0b --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/header.go @@ -0,0 +1,53 @@ +// Copyright 2018, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import "bytes" + +// XGoogHeader is for use by the Google Cloud Libraries only. +// +// XGoogHeader formats key-value pairs. +// The resulting string is suitable for x-goog-api-client header. +func XGoogHeader(keyval ...string) string { + if len(keyval) == 0 { + return "" + } + if len(keyval)%2 != 0 { + panic("gax.Header: odd argument count") + } + var buf bytes.Buffer + for i := 0; i < len(keyval); i += 2 { + buf.WriteByte(' ') + buf.WriteString(keyval[i]) + buf.WriteByte('/') + buf.WriteString(keyval[i+1]) + } + return buf.String()[1:] +} diff --git a/vendor/github.com/googleapis/gax-go/v2/invoke.go b/vendor/github.com/googleapis/gax-go/v2/invoke.go new file mode 100644 index 000000000..fe31dd004 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/invoke.go @@ -0,0 +1,99 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "context" + "strings" + "time" +) + +// APICall is a user defined call stub. +type APICall func(context.Context, CallSettings) error + +// Invoke calls the given APICall, +// performing retries as specified by opts, if any. +func Invoke(ctx context.Context, call APICall, opts ...CallOption) error { + var settings CallSettings + for _, opt := range opts { + opt.Resolve(&settings) + } + return invoke(ctx, call, settings, Sleep) +} + +// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing. +// If interrupted, Sleep returns ctx.Err(). +func Sleep(ctx context.Context, d time.Duration) error { + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return ctx.Err() + case <-t.C: + return nil + } +} + +type sleeper func(ctx context.Context, d time.Duration) error + +// invoke implements Invoke, taking an additional sleeper argument for testing. +func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { + var retryer Retryer + for { + err := call(ctx, settings) + if err == nil { + return nil + } + if settings.Retry == nil { + return err + } + // Never retry permanent certificate errors. (e.x. if ca-certificates + // are not installed). We should only make very few, targeted + // exceptions: many (other) status=Unavailable should be retried, such + // as if there's a network hiccup, or the internet goes out for a + // minute. This is also why here we are doing string parsing instead of + // simply making Unavailable a non-retried code elsewhere. + if strings.Contains(err.Error(), "x509: certificate signed by unknown authority") { + return err + } + if retryer == nil { + if r := settings.Retry(); r != nil { + retryer = r + } else { + return err + } + } + if d, ok := retryer.Retry(err); !ok { + return err + } else if err = sp(ctx, d); err != nil { + return err + } + } +} diff --git a/vendor/go.opencensus.io/AUTHORS b/vendor/go.opencensus.io/AUTHORS new file mode 100644 index 000000000..e491a9e7f --- /dev/null +++ b/vendor/go.opencensus.io/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/go.opencensus.io/LICENSE b/vendor/go.opencensus.io/LICENSE new file mode 100644 index 000000000..7a4a3ea24 --- /dev/null +++ b/vendor/go.opencensus.io/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go new file mode 100644 index 000000000..fce1f02dc --- /dev/null +++ b/vendor/go.opencensus.io/internal/internal.go @@ -0,0 +1,32 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import "time" + +// UserAgent is the user agent to be added to the outgoing +// requests from the exporters. +const UserAgent = "opencensus-go [0.8.0]" + +// MonotonicEndTime returns the end time at present +// but offset from start, monotonically. +// +// The monotonic clock is used in subtractions hence +// the duration since start added back to start gives +// end as a monotonic time. +// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks +func MonotonicEndTime(start time.Time) time.Time { + return start.Add(time.Now().Sub(start)) +} diff --git a/vendor/go.opencensus.io/internal/sanitize.go b/vendor/go.opencensus.io/internal/sanitize.go new file mode 100644 index 000000000..de8ccf236 --- /dev/null +++ b/vendor/go.opencensus.io/internal/sanitize.go @@ -0,0 +1,50 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "strings" + "unicode" +) + +const labelKeySizeLimit = 100 + +// Sanitize returns a string that is trunacated to 100 characters if it's too +// long, and replaces non-alphanumeric characters to underscores. +func Sanitize(s string) string { + if len(s) == 0 { + return s + } + if len(s) > labelKeySizeLimit { + s = s[:labelKeySizeLimit] + } + s = strings.Map(sanitizeRune, s) + if unicode.IsDigit(rune(s[0])) { + s = "key_" + s + } + if s[0] == '_' { + s = "key" + s + } + return s +} + +// converts anything that is not a letter or digit to an underscore +func sanitizeRune(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + // Everything else turns into an underscore + return '_' +} diff --git a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go new file mode 100644 index 000000000..d9f23abee --- /dev/null +++ b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go @@ -0,0 +1,72 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package tagencoding contains the tag encoding +// used interally by the stats collector. +package tagencoding + +type Values struct { + Buffer []byte + WriteIndex int + ReadIndex int +} + +func (vb *Values) growIfRequired(expected int) { + if len(vb.Buffer)-vb.WriteIndex < expected { + tmp := make([]byte, 2*(len(vb.Buffer)+1)+expected) + copy(tmp, vb.Buffer) + vb.Buffer = tmp + } +} + +func (vb *Values) WriteValue(v []byte) { + length := len(v) & 0xff + vb.growIfRequired(1 + length) + + // writing length of v + vb.Buffer[vb.WriteIndex] = byte(length) + vb.WriteIndex++ + + if length == 0 { + // No value was encoded for this key + return + } + + // writing v + copy(vb.Buffer[vb.WriteIndex:], v[:length]) + vb.WriteIndex += length +} + +// ReadValue is the helper method to read the values when decoding valuesBytes to a map[Key][]byte. +func (vb *Values) ReadValue() []byte { + // read length of v + length := int(vb.Buffer[vb.ReadIndex]) + vb.ReadIndex++ + if length == 0 { + // No value was encoded for this key + return nil + } + + // read value of v + v := make([]byte, length) + endIdx := vb.ReadIndex + length + copy(v, vb.Buffer[vb.ReadIndex:endIdx]) + vb.ReadIndex = endIdx + return v +} + +func (vb *Values) Bytes() []byte { + return vb.Buffer[:vb.WriteIndex] +} diff --git a/vendor/go.opencensus.io/internal/traceinternals.go b/vendor/go.opencensus.io/internal/traceinternals.go new file mode 100644 index 000000000..553ca68dc --- /dev/null +++ b/vendor/go.opencensus.io/internal/traceinternals.go @@ -0,0 +1,52 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "time" +) + +// Trace allows internal access to some trace functionality. +// TODO(#412): remove this +var Trace interface{} + +var LocalSpanStoreEnabled bool + +// BucketConfiguration stores the number of samples to store for span buckets +// for successful and failed spans for a particular span name. +type BucketConfiguration struct { + Name string + MaxRequestsSucceeded int + MaxRequestsErrors int +} + +// PerMethodSummary is a summary of the spans stored for a single span name. +type PerMethodSummary struct { + Active int + LatencyBuckets []LatencyBucketSummary + ErrorBuckets []ErrorBucketSummary +} + +// LatencyBucketSummary is a summary of a latency bucket. +type LatencyBucketSummary struct { + MinLatency, MaxLatency time.Duration + Size int +} + +// ErrorBucketSummary is a summary of an error bucket. +type ErrorBucketSummary struct { + ErrorCode int32 + Size int +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client.go b/vendor/go.opencensus.io/plugin/ocgrpc/client.go new file mode 100644 index 000000000..f4a2d4d27 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client.go @@ -0,0 +1,55 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocgrpc + +import ( + "go.opencensus.io/trace" + "golang.org/x/net/context" + + "google.golang.org/grpc/stats" +) + +// ClientHandler implements a gRPC stats.Handler for recording OpenCensus stats and +// traces. Use with gRPC clients only. +type ClientHandler struct { + // StartOptions allows configuring the StartOptions used to create new spans. + // + // StartOptions.SpanKind will always be set to trace.SpanKindClient + // for spans started by this handler. + StartOptions trace.StartOptions +} + +func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { + // no-op +} + +// TagConn exists to satisfy gRPC stats.Handler. +func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { + // no-op + return ctx +} + +// HandleRPC implements per-RPC tracing and stats instrumentation. +func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + traceHandleRPC(ctx, rs) + c.statsHandleRPC(ctx, rs) +} + +// TagRPC implements per-RPC context management. +func (c *ClientHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + ctx = c.traceTagRPC(ctx, rti) + ctx = c.statsTagRPC(ctx, rti) + return ctx +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go new file mode 100644 index 000000000..0d421db2c --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go @@ -0,0 +1,103 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +// The following variables are measures are recorded by ClientHandler: +var ( + ClientErrorCount = stats.Int64("grpc.io/client/error_count", "RPC Errors", stats.UnitNone) + ClientRequestBytes = stats.Int64("grpc.io/client/request_bytes", "Request bytes", stats.UnitBytes) + ClientResponseBytes = stats.Int64("grpc.io/client/response_bytes", "Response bytes", stats.UnitBytes) + ClientStartedCount = stats.Int64("grpc.io/client/started_count", "Number of client RPCs (streams) started", stats.UnitNone) + ClientFinishedCount = stats.Int64("grpc.io/client/finished_count", "Number of client RPCs (streams) finished", stats.UnitNone) + ClientRequestCount = stats.Int64("grpc.io/client/request_count", "Number of client RPC request messages", stats.UnitNone) + ClientResponseCount = stats.Int64("grpc.io/client/response_count", "Number of client RPC response messages", stats.UnitNone) + ClientRoundTripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "RPC roundtrip latency in msecs", stats.UnitMilliseconds) +) + +// Predefined views may be subscribed to collect data for the above measures. +// As always, you may also define your own custom views over measures collected by this +// package. These are declared as a convenience only; none are subscribed by +// default. +var ( + ClientErrorCountView = &view.View{ + Name: "grpc.io/client/error_count", + Description: "RPC Errors", + TagKeys: []tag.Key{KeyStatus, KeyMethod}, + Measure: ClientErrorCount, + Aggregation: view.Count(), + } + + ClientRoundTripLatencyView = &view.View{ + Name: "grpc.io/client/roundtrip_latency", + Description: "Latency in msecs", + TagKeys: []tag.Key{KeyMethod}, + Measure: ClientRoundTripLatency, + Aggregation: DefaultMillisecondsDistribution, + } + + ClientRequestBytesView = &view.View{ + Name: "grpc.io/client/request_bytes", + Description: "Request bytes", + TagKeys: []tag.Key{KeyMethod}, + Measure: ClientRequestBytes, + Aggregation: DefaultBytesDistribution, + } + + ClientResponseBytesView = &view.View{ + Name: "grpc.io/client/response_bytes", + Description: "Response bytes", + TagKeys: []tag.Key{KeyMethod}, + Measure: ClientResponseBytes, + Aggregation: DefaultBytesDistribution, + } + + ClientRequestCountView = &view.View{ + Name: "grpc.io/client/request_count", + Description: "Count of request messages per client RPC", + TagKeys: []tag.Key{KeyMethod}, + Measure: ClientRequestCount, + Aggregation: DefaultMessageCountDistribution, + } + + ClientResponseCountView = &view.View{ + Name: "grpc.io/client/response_count", + Description: "Count of response messages per client RPC", + TagKeys: []tag.Key{KeyMethod}, + Measure: ClientResponseCount, + Aggregation: DefaultMessageCountDistribution, + } +) + +// DefaultClientViews are the default client views provided by this package. +var DefaultClientViews = []*view.View{ + ClientErrorCountView, + ClientRoundTripLatencyView, + ClientRequestBytesView, + ClientResponseBytesView, + ClientRequestCountView, + ClientResponseCountView, +} + +// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count. +// TODO(acetechnologist): This is temporary and will need to be replaced by a +// mechanism to load these defaults from a common repository/config shared by +// all supported languages. Likely a serialized protobuf of these defaults. diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go new file mode 100644 index 000000000..22d076dc7 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go @@ -0,0 +1,128 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "sync/atomic" + "time" + + ocstats "go.opencensus.io/stats" + "go.opencensus.io/tag" + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// statsTagRPC gets the tag.Map populated by the application code, serializes +// its tags into the GRPC metadata in order to be sent to the server. +func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + startTime := time.Now() + if info == nil { + if grpclog.V(2) { + grpclog.Infof("clientHandler.TagRPC called with nil info.", info.FullMethodName) + } + return ctx + } + + d := &rpcData{ + startTime: startTime, + method: info.FullMethodName, + } + ts := tag.FromContext(ctx) + if ts != nil { + encoded := tag.Encode(ts) + ctx = stats.SetTags(ctx, encoded) + } + + // TODO(acetechnologist): should we be recording this later? What is the + // point of updating d.reqLen & d.reqCount if we update now? + record(ctx, d, "", ClientStartedCount.M(1)) + return context.WithValue(ctx, grpcClientRPCKey, d) +} + +// statsHandleRPC processes the RPC events. +func (h *ClientHandler) statsHandleRPC(ctx context.Context, s stats.RPCStats) { + switch st := s.(type) { + case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: + // do nothing for client + case *stats.OutPayload: + h.handleRPCOutPayload(ctx, st) + case *stats.InPayload: + h.handleRPCInPayload(ctx, st) + case *stats.End: + h.handleRPCEnd(ctx, st) + default: + grpclog.Infof("unexpected stats: %T", st) + } +} + +func (h *ClientHandler) handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) { + d, ok := ctx.Value(grpcClientRPCKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("clientHandler.handleRPCOutPayload failed to retrieve *rpcData from context") + } + return + } + + record(ctx, d, "", ClientRequestBytes.M(int64(s.Length))) + atomic.AddInt64(&d.reqCount, 1) +} + +func (h *ClientHandler) handleRPCInPayload(ctx context.Context, s *stats.InPayload) { + d, ok := ctx.Value(grpcClientRPCKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("failed to retrieve *rpcData from context") + } + return + } + + record(ctx, d, "", ClientResponseBytes.M(int64(s.Length))) + atomic.AddInt64(&d.respCount, 1) +} + +func (h *ClientHandler) handleRPCEnd(ctx context.Context, s *stats.End) { + d, ok := ctx.Value(grpcClientRPCKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("failed to retrieve *rpcData from context") + } + return + } + + elapsedTime := time.Since(d.startTime) + reqCount := atomic.LoadInt64(&d.reqCount) + respCount := atomic.LoadInt64(&d.respCount) + + m := []ocstats.Measurement{ + ClientRequestCount.M(reqCount), + ClientResponseCount.M(respCount), + ClientFinishedCount.M(1), + ClientRoundTripLatency.M(float64(elapsedTime) / float64(time.Millisecond)), + } + + var st string + if s.Error != nil { + s, ok := status.FromError(s.Error) + if ok { + st = s.Code().String() + } + m = append(m, ClientErrorCount.M(1)) + } + record(ctx, d, st, m...) +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go b/vendor/go.opencensus.io/plugin/ocgrpc/doc.go new file mode 100644 index 000000000..1370323fb --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/doc.go @@ -0,0 +1,19 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ocgrpc contains OpenCensus stats and trace +// integrations for gRPC. +// +// Use ServerHandler for servers and ClientHandler for clients. +package ocgrpc // import "go.opencensus.io/plugin/ocgrpc" diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server.go b/vendor/go.opencensus.io/plugin/ocgrpc/server.go new file mode 100644 index 000000000..f5da89b90 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server.go @@ -0,0 +1,80 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocgrpc + +import ( + "go.opencensus.io/trace" + "golang.org/x/net/context" + + "google.golang.org/grpc/stats" +) + +// ServerHandler implements gRPC stats.Handler recording OpenCensus stats and +// traces. Use with gRPC servers. +// +// When installed (see Example), tracing metadata is read from inbound RPCs +// by default. If no tracing metadata is present, or if the tracing metadata is +// present but the SpanContext isn't sampled, then a new trace may be started +// (as determined by Sampler). +type ServerHandler struct { + // IsPublicEndpoint may be set to true to always start a new trace around + // each RPC. Any SpanContext in the RPC metadata will be added as a linked + // span instead of making it the parent of the span created around the + // server RPC. + // + // Be aware that if you leave this false (the default) on a public-facing + // server, callers will be able to send tracing metadata in gRPC headers + // and trigger traces in your backend. + IsPublicEndpoint bool + + // StartOptions to use for to spans started around RPCs handled by this server. + // + // These will apply even if there is tracing metadata already + // present on the inbound RPC but the SpanContext is not sampled. This + // ensures that each service has some opportunity to be traced. If you would + // like to not add any additional traces for this gRPC service, set: + // + // StartOptions.Sampler = trace.ProbabilitySampler(0.0) + // + // StartOptions.SpanKind will always be set to trace.SpanKindServer + // for spans started by this handler. + StartOptions trace.StartOptions +} + +var _ stats.Handler = (*ServerHandler)(nil) + +// HandleConn exists to satisfy gRPC stats.Handler. +func (s *ServerHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { + // no-op +} + +// TagConn exists to satisfy gRPC stats.Handler. +func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { + // no-op + return ctx +} + +// HandleRPC implements per-RPC tracing and stats instrumentation. +func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + traceHandleRPC(ctx, rs) + s.statsHandleRPC(ctx, rs) +} + +// TagRPC implements per-RPC context management. +func (s *ServerHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + ctx = s.traceTagRPC(ctx, rti) + ctx = s.statsTagRPC(ctx, rti) + return ctx +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go new file mode 100644 index 000000000..3b1f43b06 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go @@ -0,0 +1,104 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +// The following variables are measures are recorded by ServerHandler: +var ( + ServerErrorCount = stats.Int64("grpc.io/server/error_count", "RPC Errors", stats.UnitNone) + ServerServerElapsedTime = stats.Float64("grpc.io/server/server_elapsed_time", "Server elapsed time in msecs", stats.UnitMilliseconds) + ServerRequestBytes = stats.Int64("grpc.io/server/request_bytes", "Request bytes", stats.UnitBytes) + ServerResponseBytes = stats.Int64("grpc.io/server/response_bytes", "Response bytes", stats.UnitBytes) + ServerStartedCount = stats.Int64("grpc.io/server/started_count", "Number of server RPCs (streams) started", stats.UnitNone) + ServerFinishedCount = stats.Int64("grpc.io/server/finished_count", "Number of server RPCs (streams) finished", stats.UnitNone) + ServerRequestCount = stats.Int64("grpc.io/server/request_count", "Number of server RPC request messages", stats.UnitNone) + ServerResponseCount = stats.Int64("grpc.io/server/response_count", "Number of server RPC response messages", stats.UnitNone) +) + +// TODO(acetechnologist): This is temporary and will need to be replaced by a +// mechanism to load these defaults from a common repository/config shared by +// all supported languages. Likely a serialized protobuf of these defaults. + +// Predefined views may be subscribed to collect data for the above measures. +// As always, you may also define your own custom views over measures collected by this +// package. These are declared as a convenience only; none are subscribed by +// default. +var ( + ServerErrorCountView = &view.View{ + Name: "grpc.io/server/error_count", + Description: "RPC Errors", + TagKeys: []tag.Key{KeyMethod, KeyStatus}, + Measure: ServerErrorCount, + Aggregation: view.Count(), + } + + ServerServerElapsedTimeView = &view.View{ + Name: "grpc.io/server/server_elapsed_time", + Description: "Server elapsed time in msecs", + TagKeys: []tag.Key{KeyMethod}, + Measure: ServerServerElapsedTime, + Aggregation: DefaultMillisecondsDistribution, + } + + ServerRequestBytesView = &view.View{ + Name: "grpc.io/server/request_bytes", + Description: "Request bytes", + TagKeys: []tag.Key{KeyMethod}, + Measure: ServerRequestBytes, + Aggregation: DefaultBytesDistribution, + } + + ServerResponseBytesView = &view.View{ + Name: "grpc.io/server/response_bytes", + Description: "Response bytes", + TagKeys: []tag.Key{KeyMethod}, + Measure: ServerResponseBytes, + Aggregation: DefaultBytesDistribution, + } + + ServerRequestCountView = &view.View{ + Name: "grpc.io/server/request_count", + Description: "Count of request messages per server RPC", + TagKeys: []tag.Key{KeyMethod}, + Measure: ServerRequestCount, + Aggregation: DefaultMessageCountDistribution, + } + + ServerResponseCountView = &view.View{ + Name: "grpc.io/server/response_count", + Description: "Count of response messages per server RPC", + TagKeys: []tag.Key{KeyMethod}, + Measure: ServerResponseCount, + Aggregation: DefaultMessageCountDistribution, + } +) + +// DefaultServerViews are the default server views provided by this package. +var DefaultServerViews = []*view.View{ + ServerErrorCountView, + ServerServerElapsedTimeView, + ServerRequestBytesView, + ServerResponseBytesView, + ServerRequestCountView, + ServerResponseCountView, +} + +// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count. diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go new file mode 100644 index 000000000..c146da5dd --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go @@ -0,0 +1,137 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "fmt" + "sync/atomic" + "time" + + "golang.org/x/net/context" + + ocstats "go.opencensus.io/stats" + "go.opencensus.io/tag" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from +// it and creates a new tag.Map and puts them into the returned context. +func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + startTime := time.Now() + if info == nil { + if grpclog.V(2) { + grpclog.Infof("serverHandler.TagRPC called with nil info.", info.FullMethodName) + } + return ctx + } + d := &rpcData{ + startTime: startTime, + method: info.FullMethodName, + } + ctx, _ = h.createTags(ctx) + record(ctx, d, "", ServerStartedCount.M(1)) + return context.WithValue(ctx, grpcServerRPCKey, d) +} + +// statsHandleRPC processes the RPC events. +func (h *ServerHandler) statsHandleRPC(ctx context.Context, s stats.RPCStats) { + switch st := s.(type) { + case *stats.Begin, *stats.InHeader, *stats.InTrailer, *stats.OutHeader, *stats.OutTrailer: + // Do nothing for server + case *stats.InPayload: + h.handleRPCInPayload(ctx, st) + case *stats.OutPayload: + // For stream it can be called multiple times per RPC. + h.handleRPCOutPayload(ctx, st) + case *stats.End: + h.handleRPCEnd(ctx, st) + default: + grpclog.Infof("unexpected stats: %T", st) + } +} + +func (h *ServerHandler) handleRPCInPayload(ctx context.Context, s *stats.InPayload) { + d, ok := ctx.Value(grpcServerRPCKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("handleRPCInPayload: failed to retrieve *rpcData from context") + } + return + } + + record(ctx, d, "", ServerRequestBytes.M(int64(s.Length))) + atomic.AddInt64(&d.reqCount, 1) +} + +func (h *ServerHandler) handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) { + d, ok := ctx.Value(grpcServerRPCKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("handleRPCOutPayload: failed to retrieve *rpcData from context") + } + return + } + + record(ctx, d, "", ServerResponseBytes.M(int64(s.Length))) + atomic.AddInt64(&d.respCount, 1) +} + +func (h *ServerHandler) handleRPCEnd(ctx context.Context, s *stats.End) { + d, ok := ctx.Value(grpcServerRPCKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("serverHandler.handleRPCEnd failed to retrieve *rpcData from context") + } + return + } + + elapsedTime := time.Since(d.startTime) + reqCount := atomic.LoadInt64(&d.reqCount) + respCount := atomic.LoadInt64(&d.respCount) + + m := []ocstats.Measurement{ + ServerRequestCount.M(reqCount), + ServerResponseCount.M(respCount), + ServerFinishedCount.M(1), + ServerServerElapsedTime.M(float64(elapsedTime) / float64(time.Millisecond)), + } + + var st string + if s.Error != nil { + s, ok := status.FromError(s.Error) + if ok { + st = s.Code().String() + } + m = append(m, ServerErrorCount.M(1)) + } + record(ctx, d, st, m...) +} + +// createTags creates a new tag map containing the tags extracted from the +// gRPC metadata. +func (h *ServerHandler) createTags(ctx context.Context) (context.Context, error) { + buf := stats.Tags(ctx) + if buf == nil { + return ctx, nil + } + propagated, err := tag.Decode(buf) + if err != nil { + return nil, fmt.Errorf("serverHandler.createTags failed to decode: %v", err) + } + return tag.NewContext(ctx, propagated), nil +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go new file mode 100644 index 000000000..52b41ac28 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go @@ -0,0 +1,77 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "strings" + "time" + + ocstats "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "golang.org/x/net/context" +) + +type grpcInstrumentationKey string + +// rpcData holds the instrumentation RPC data that is needed between the start +// and end of an call. It holds the info that this package needs to keep track +// of between the various GRPC events. +type rpcData struct { + // reqCount and respCount has to be the first words + // in order to be 64-aligned on 32-bit architectures. + reqCount, respCount int64 // access atomically + + // startTime represents the time at which TagRPC was invoked at the + // beginning of an RPC. It is an appoximation of the time when the + // application code invoked GRPC code. + startTime time.Time + method string +} + +// The following variables define the default hard-coded auxiliary data used by +// both the default GRPC client and GRPC server metrics. +var ( + DefaultBytesDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultMillisecondsDistribution = view.Distribution(0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) + DefaultMessageCountDistribution = view.Distribution(0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536) +) + +var ( + KeyMethod, _ = tag.NewKey("method") // gRPC service and method name + KeyStatus, _ = tag.NewKey("canonical_status") // Canonical status code +) + +var ( + grpcServerConnKey = grpcInstrumentationKey("server-conn") + grpcServerRPCKey = grpcInstrumentationKey("server-rpc") + grpcClientRPCKey = grpcInstrumentationKey("client-rpc") +) + +func methodName(fullname string) string { + return strings.TrimLeft(fullname, "/") +} + +func record(ctx context.Context, data *rpcData, status string, m ...ocstats.Measurement) { + mods := []tag.Mutator{ + tag.Upsert(KeyMethod, methodName(data.method)), + } + if status != "" { + mods = append(mods, tag.Upsert(KeyStatus, status)) + } + ctx, _ = tag.New(ctx, mods...) + ocstats.Record(ctx, m...) +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go new file mode 100644 index 000000000..6d6836f02 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go @@ -0,0 +1,109 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocgrpc + +import ( + "strings" + + "google.golang.org/grpc/codes" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +const traceContextKey = "grpc-trace-bin" + +// TagRPC creates a new trace span for the client side of the RPC. +// +// It returns ctx with the new trace span added and a serialization of the +// SpanContext added to the outgoing gRPC metadata. +func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + name := strings.TrimPrefix(rti.FullMethodName, "/") + name = strings.Replace(name, "/", ".", -1) + span := trace.NewSpan(name, trace.FromContext(ctx), trace.StartOptions{ + Sampler: c.StartOptions.Sampler, + SpanKind: trace.SpanKindClient, + }) // span is ended by traceHandleRPC + ctx = trace.WithSpan(ctx, span) + traceContextBinary := propagation.Binary(span.SpanContext()) + return metadata.AppendToOutgoingContext(ctx, traceContextKey, string(traceContextBinary)) +} + +// TagRPC creates a new trace span for the server side of the RPC. +// +// It checks the incoming gRPC metadata in ctx for a SpanContext, and if +// it finds one, uses that SpanContext as the parent context of the new span. +// +// It returns ctx, with the new trace span added. +func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + opts := trace.StartOptions{ + Sampler: s.StartOptions.Sampler, + SpanKind: trace.SpanKindServer, + } + + md, _ := metadata.FromIncomingContext(ctx) + name := strings.TrimPrefix(rti.FullMethodName, "/") + name = strings.Replace(name, "/", ".", -1) + traceContext := md[traceContextKey] + var ( + parent trace.SpanContext + haveParent bool + ) + if len(traceContext) > 0 { + // Metadata with keys ending in -bin are actually binary. They are base64 + // encoded before being put on the wire, see: + // https://github.com/grpc/grpc-go/blob/08d6261/Documentation/grpc-metadata.md#storing-binary-data-in-metadata + traceContextBinary := []byte(traceContext[0]) + parent, haveParent = propagation.FromBinary(traceContextBinary) + if haveParent && !s.IsPublicEndpoint { + span := trace.NewSpanWithRemoteParent(name, parent, opts) + return trace.WithSpan(ctx, span) + } + } + span := trace.NewSpan(name, nil, opts) + if haveParent { + span.AddLink(trace.Link{TraceID: parent.TraceID, SpanID: parent.SpanID, Type: trace.LinkTypeChild}) + } + return trace.WithSpan(ctx, span) +} + +func traceHandleRPC(ctx context.Context, rs stats.RPCStats) { + span := trace.FromContext(ctx) + // TODO: compressed and uncompressed sizes are not populated in every message. + switch rs := rs.(type) { + case *stats.Begin: + span.AddAttributes( + trace.BoolAttribute("Client", rs.Client), + trace.BoolAttribute("FailFast", rs.FailFast)) + case *stats.InPayload: + span.AddMessageReceiveEvent(0 /* TODO: messageID */, int64(rs.Length), int64(rs.WireLength)) + case *stats.OutPayload: + span.AddMessageSendEvent(0, int64(rs.Length), int64(rs.WireLength)) + case *stats.End: + if rs.Error != nil { + s, ok := status.FromError(rs.Error) + if ok { + span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()}) + } else { + span.SetStatus(trace.Status{Code: int32(codes.Internal), Message: rs.Error.Error()}) + } + } + span.End() + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go new file mode 100644 index 000000000..37f42b3b1 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/client.go @@ -0,0 +1,84 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "net/http" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// Transport is an http.RoundTripper that instruments all outgoing requests with +// stats and tracing. The zero value is intended to be a useful default, but for +// now it's recommended that you explicitly set Propagation. +type Transport struct { + // Base may be set to wrap another http.RoundTripper that does the actual + // requests. By default http.DefaultTransport is used. + // + // If base HTTP roundtripper implements CancelRequest, + // the returned round tripper will be cancelable. + Base http.RoundTripper + + // Propagation defines how traces are propagated. If unspecified, a default + // (currently B3 format) will be used. + Propagation propagation.HTTPFormat + + // StartOptions are applied to the span started by this Transport around each + // request. + // + // StartOptions.SpanKind will always be set to trace.SpanKindClient + // for spans started by this transport. + StartOptions trace.StartOptions + + // TODO: Implement tag propagation for HTTP. +} + +// RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base() + // TODO: remove excessive nesting of http.RoundTrippers here. + format := t.Propagation + if format == nil { + format = defaultFormat + } + rt = &traceTransport{ + base: rt, + format: format, + startOptions: trace.StartOptions{ + Sampler: t.StartOptions.Sampler, + SpanKind: trace.SpanKindClient, + }, + } + rt = statsTransport{base: rt} + return rt.RoundTrip(req) +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + cr.CancelRequest(req) + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go new file mode 100644 index 000000000..9b286b929 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go @@ -0,0 +1,125 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "io" + "net/http" + "strconv" + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" +) + +// statsTransport is an http.RoundTripper that collects stats for the outgoing requests. +type statsTransport struct { + base http.RoundTripper +} + +// RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request. +func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { + ctx, _ := tag.New(req.Context(), + tag.Upsert(Host, req.URL.Host), + tag.Upsert(Path, req.URL.Path), + tag.Upsert(Method, req.Method)) + req = req.WithContext(ctx) + track := &tracker{ + start: time.Now(), + ctx: ctx, + } + if req.Body == nil { + // TODO: Handle cases where ContentLength is not set. + track.reqSize = -1 + } else if req.ContentLength > 0 { + track.reqSize = req.ContentLength + } + stats.Record(ctx, ClientRequestCount.M(1)) + + // Perform request. + resp, err := t.base.RoundTrip(req) + + if err != nil { + track.statusCode = http.StatusInternalServerError + track.end() + } else { + track.statusCode = resp.StatusCode + if resp.Body == nil { + track.end() + } else { + track.body = resp.Body + resp.Body = track + } + } + return resp, err +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t statsTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base.(canceler); ok { + cr.CancelRequest(req) + } +} + +type tracker struct { + ctx context.Context + respSize int64 + reqSize int64 + start time.Time + body io.ReadCloser + statusCode int + endOnce sync.Once +} + +var _ io.ReadCloser = (*tracker)(nil) + +func (t *tracker) end() { + t.endOnce.Do(func() { + m := []stats.Measurement{ + ClientLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), + ClientResponseBytes.M(t.respSize), + } + if t.reqSize >= 0 { + m = append(m, ClientRequestBytes.M(t.reqSize)) + } + ctx, _ := tag.New(t.ctx, tag.Upsert(StatusCode, strconv.Itoa(t.statusCode))) + stats.Record(ctx, m...) + }) +} + +func (t *tracker) Read(b []byte) (int, error) { + n, err := t.body.Read(b) + switch err { + case nil: + t.respSize += int64(n) + return n, nil + case io.EOF: + t.end() + } + return n, err +} + +func (t *tracker) Close() error { + // Invoking endSpan on Close will help catch the cases + // in which a read returned a non-nil error, we set the + // span status but didn't end the span. + t.end() + return t.body.Close() +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/doc.go b/vendor/go.opencensus.io/plugin/ochttp/doc.go new file mode 100644 index 000000000..10e626b16 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ochttp provides OpenCensus instrumentation for net/http package. +// +// For server instrumentation, see Handler. For client-side instrumentation, +// see Transport. +package ochttp // import "go.opencensus.io/plugin/ochttp" diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go new file mode 100644 index 000000000..ddc99460a --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go @@ -0,0 +1,119 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package b3 contains a propagation.HTTPFormat implementation +// for B3 propagation. See https://github.com/openzipkin/b3-propagation +// for more details. +package b3 + +import ( + "encoding/hex" + "net/http" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +const ( + traceIDHeader = "X-B3-TraceId" + spanIDHeader = "X-B3-SpanId" + sampledHeader = "X-B3-Sampled" +) + +// HTTPFormat implements propagation.HTTPFormat to propagate +// traces in HTTP headers in B3 propagation format. +// HTTPFormat skips the X-B3-ParentId and X-B3-Flags headers +// because there are additional fields not represented in the +// OpenCensus span context. Spans created from the incoming +// header will be the direct children of the client-side span. +// Similarly, reciever of the outgoing spans should use client-side +// span created by OpenCensus as the parent. +type HTTPFormat struct{} + +var _ propagation.HTTPFormat = (*HTTPFormat)(nil) + +// SpanContextFromRequest extracts a B3 span context from incoming requests. +func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + tid, ok := parseTraceID(req.Header.Get(traceIDHeader)) + if !ok { + return trace.SpanContext{}, false + } + sid, ok := parseSpanID(req.Header.Get(spanIDHeader)) + if !ok { + return trace.SpanContext{}, false + } + sampled, _ := parseSampled(req.Header.Get(sampledHeader)) + return trace.SpanContext{ + TraceID: tid, + SpanID: sid, + TraceOptions: sampled, + }, true +} + +func parseTraceID(tid string) (trace.TraceID, bool) { + if tid == "" { + return trace.TraceID{}, false + } + b, err := hex.DecodeString(tid) + if err != nil { + return trace.TraceID{}, false + } + var traceID trace.TraceID + if len(b) <= 8 { + // The lower 64-bits. + start := 8 + (8 - len(b)) + copy(traceID[start:], b) + } else { + start := 16 - len(b) + copy(traceID[start:], b) + } + + return traceID, true +} + +func parseSpanID(sid string) (spanID trace.SpanID, ok bool) { + if sid == "" { + return trace.SpanID{}, false + } + b, err := hex.DecodeString(sid) + if err != nil { + return trace.SpanID{}, false + } + start := (8 - len(b)) + copy(spanID[start:], b) + return spanID, true +} + +func parseSampled(sampled string) (trace.TraceOptions, bool) { + switch sampled { + case "true", "1": + return trace.TraceOptions(1), true + default: + return trace.TraceOptions(0), false + } +} + +// SpanContextToRequest modifies the given request to include B3 headers. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + req.Header.Set(traceIDHeader, hex.EncodeToString(sc.TraceID[:])) + req.Header.Set(spanIDHeader, hex.EncodeToString(sc.SpanID[:])) + + var sampled string + if sc.IsSampled() { + sampled = "1" + } else { + sampled = "0" + } + req.Header.Set(sampledHeader, sampled) +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go new file mode 100644 index 000000000..a92c7c1ed --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -0,0 +1,193 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "bufio" + "context" + "errors" + "net" + "net/http" + "strconv" + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// Handler is a http.Handler that is aware of the incoming request's span. +// +// The extracted span can be accessed from the incoming request's +// context. +// +// span := trace.FromContext(r.Context()) +// +// The server span will be automatically ended at the end of ServeHTTP. +// +// Incoming propagation mechanism is determined by the given HTTP propagators. +type Handler struct { + // Propagation defines how traces are propagated. If unspecified, + // B3 propagation will be used. + Propagation propagation.HTTPFormat + + // Handler is the handler used to handle the incoming request. + Handler http.Handler + + // StartOptions are applied to the span started by this Handler around each + // request. + // + // StartOptions.SpanKind will always be set to trace.SpanKindServer + // for spans started by this transport. + StartOptions trace.StartOptions + + // IsPublicEndpoint should be set to true for publicly accessible HTTP(S) + // servers. If true, any trace metadata set on the incoming request will + // be added as a linked trace instead of being added as a parent of the + // current trace. + IsPublicEndpoint bool +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var traceEnd, statsEnd func() + r, traceEnd = h.startTrace(w, r) + defer traceEnd() + w, statsEnd = h.startStats(w, r) + defer statsEnd() + handler := h.Handler + if handler == nil { + handler = http.DefaultServeMux + } + handler.ServeHTTP(w, r) +} + +func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { + opts := trace.StartOptions{ + Sampler: h.StartOptions.Sampler, + SpanKind: trace.SpanKindServer, + } + + name := spanNameFromURL(r.URL) + ctx := r.Context() + var span *trace.Span + sc, ok := h.extractSpanContext(r) + if ok && !h.IsPublicEndpoint { + span = trace.NewSpanWithRemoteParent(name, sc, opts) + ctx = trace.WithSpan(ctx, span) + } else { + span = trace.NewSpan(name, nil, opts) + if ok { + span.AddLink(trace.Link{ + TraceID: sc.TraceID, + SpanID: sc.SpanID, + Type: trace.LinkTypeChild, + Attributes: nil, + }) + } + } + ctx = trace.WithSpan(ctx, span) + span.AddAttributes(requestAttrs(r)...) + return r.WithContext(trace.WithSpan(r.Context(), span)), span.End +} + +func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) { + if h.Propagation == nil { + return defaultFormat.SpanContextFromRequest(r) + } + return h.Propagation.SpanContextFromRequest(r) +} + +func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func()) { + ctx, _ := tag.New(r.Context(), + tag.Upsert(Host, r.URL.Host), + tag.Upsert(Path, r.URL.Path), + tag.Upsert(Method, r.Method)) + track := &trackingResponseWriter{ + start: time.Now(), + ctx: ctx, + writer: w, + } + if r.Body == nil { + // TODO: Handle cases where ContentLength is not set. + track.reqSize = -1 + } else if r.ContentLength > 0 { + track.reqSize = r.ContentLength + } + stats.Record(ctx, ServerRequestCount.M(1)) + return track, track.end +} + +type trackingResponseWriter struct { + ctx context.Context + reqSize int64 + respSize int64 + start time.Time + statusCode int + endOnce sync.Once + writer http.ResponseWriter +} + +var _ http.ResponseWriter = (*trackingResponseWriter)(nil) +var _ http.Hijacker = (*trackingResponseWriter)(nil) + +var errHijackerUnimplemented = errors.New("ResponseWriter does not implement http.Hijacker") + +func (t *trackingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hj, ok := t.writer.(http.Hijacker) + if !ok { + return nil, nil, errHijackerUnimplemented + } + return hj.Hijack() +} + +func (t *trackingResponseWriter) end() { + t.endOnce.Do(func() { + if t.statusCode == 0 { + t.statusCode = 200 + } + m := []stats.Measurement{ + ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), + ServerResponseBytes.M(t.respSize), + } + if t.reqSize >= 0 { + m = append(m, ServerRequestBytes.M(t.reqSize)) + } + ctx, _ := tag.New(t.ctx, tag.Upsert(StatusCode, strconv.Itoa(t.statusCode))) + stats.Record(ctx, m...) + }) +} + +func (t *trackingResponseWriter) Header() http.Header { + return t.writer.Header() +} + +func (t *trackingResponseWriter) Write(data []byte) (int, error) { + n, err := t.writer.Write(data) + t.respSize += int64(n) + return n, err +} + +func (t *trackingResponseWriter) WriteHeader(statusCode int) { + t.writer.WriteHeader(statusCode) + t.statusCode = statusCode +} + +func (t *trackingResponseWriter) Flush() { + if flusher, ok := t.writer.(http.Flusher); ok { + flusher.Flush() + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go new file mode 100644 index 000000000..803a606f2 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/stats.go @@ -0,0 +1,173 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +// The following client HTTP measures are supported for use in custom views. +var ( + ClientRequestCount = stats.Int64("opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitNone) + ClientRequestBytes = stats.Int64("opencensus.io/http/client/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) + ClientResponseBytes = stats.Int64("opencensus.io/http/client/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) + ClientLatency = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds) +) + +// The following server HTTP measures are supported for use in custom views: +var ( + ServerRequestCount = stats.Int64("opencensus.io/http/server/request_count", "Number of HTTP requests started", stats.UnitNone) + ServerRequestBytes = stats.Int64("opencensus.io/http/server/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) + ServerResponseBytes = stats.Int64("opencensus.io/http/server/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) + ServerLatency = stats.Float64("opencensus.io/http/server/latency", "End-to-end latency", stats.UnitMilliseconds) +) + +// The following tags are applied to stats recorded by this package. Host, Path +// and Method are applied to all measures. StatusCode is not applied to +// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known. +var ( + // Host is the value of the HTTP Host header. + Host, _ = tag.NewKey("http.host") + + // StatusCode is the numeric HTTP response status code, + // or "error" if a transport error occurred and no status code was read. + StatusCode, _ = tag.NewKey("http.status") + + // Path is the URL path (not including query string) in the request. + Path, _ = tag.NewKey("http.path") + + // Method is the HTTP method of the request, capitalized (GET, POST, etc.). + Method, _ = tag.NewKey("http.method") +) + +// Default distributions used by views in this package. +var ( + DefaultSizeDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultLatencyDistribution = view.Distribution(0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) +) + +// Package ochttp provides some convenience views. +// You need to subscribe to the views for data to actually be collected. +var ( + ClientRequestCountView = &view.View{ + Name: "opencensus.io/http/client/request_count", + Description: "Count of HTTP requests started", + Measure: ClientRequestCount, + Aggregation: view.Count(), + } + + ClientRequestBytesView = &view.View{ + Name: "opencensus.io/http/client/request_bytes", + Description: "Size distribution of HTTP request body", + Measure: ClientRequestBytes, + Aggregation: DefaultSizeDistribution, + } + + ClientResponseBytesView = &view.View{ + Name: "opencensus.io/http/client/response_bytes", + Description: "Size distribution of HTTP response body", + Measure: ClientResponseBytes, + Aggregation: DefaultSizeDistribution, + } + + ClientLatencyView = &view.View{ + Name: "opencensus.io/http/client/latency", + Description: "Latency distribution of HTTP requests", + Measure: ClientLatency, + Aggregation: DefaultLatencyDistribution, + } + + ClientRequestCountByMethod = &view.View{ + Name: "opencensus.io/http/client/request_count_by_method", + Description: "Client request count by HTTP method", + TagKeys: []tag.Key{Method}, + Measure: ClientRequestCount, + Aggregation: view.Count(), + } + + ClientResponseCountByStatusCode = &view.View{ + Name: "opencensus.io/http/client/response_count_by_status_code", + Description: "Client response count by status code", + TagKeys: []tag.Key{StatusCode}, + Measure: ClientLatency, + Aggregation: view.Count(), + } + + ServerRequestCountView = &view.View{ + Name: "opencensus.io/http/server/request_count", + Description: "Count of HTTP requests started", + Measure: ServerRequestCount, + Aggregation: view.Count(), + } + + ServerRequestBytesView = &view.View{ + Name: "opencensus.io/http/server/request_bytes", + Description: "Size distribution of HTTP request body", + Measure: ServerRequestBytes, + Aggregation: DefaultSizeDistribution, + } + + ServerResponseBytesView = &view.View{ + Name: "opencensus.io/http/server/response_bytes", + Description: "Size distribution of HTTP response body", + Measure: ServerResponseBytes, + Aggregation: DefaultSizeDistribution, + } + + ServerLatencyView = &view.View{ + Name: "opencensus.io/http/server/latency", + Description: "Latency distribution of HTTP requests", + Measure: ServerLatency, + Aggregation: DefaultLatencyDistribution, + } + + ServerRequestCountByMethod = &view.View{ + Name: "opencensus.io/http/server/request_count_by_method", + Description: "Server request count by HTTP method", + TagKeys: []tag.Key{Method}, + Measure: ServerRequestCount, + Aggregation: view.Count(), + } + + ServerResponseCountByStatusCode = &view.View{ + Name: "opencensus.io/http/server/response_count_by_status_code", + Description: "Server response count by status code", + TagKeys: []tag.Key{StatusCode}, + Measure: ServerLatency, + Aggregation: view.Count(), + } +) + +// DefaultClientViews are the default client views provided by this package. +var DefaultClientViews = []*view.View{ + ClientRequestCountView, + ClientRequestBytesView, + ClientResponseBytesView, + ClientLatencyView, + ClientRequestCountByMethod, + ClientResponseCountByStatusCode, +} + +// DefaultServerViews are the default server views provided by this package. +var DefaultServerViews = []*view.View{ + ServerRequestCountView, + ServerRequestBytesView, + ServerResponseBytesView, + ServerLatencyView, + ServerRequestCountByMethod, + ServerResponseCountByStatusCode, +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go new file mode 100644 index 000000000..10d4a7060 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/trace.go @@ -0,0 +1,215 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "io" + "net/http" + "net/url" + + "go.opencensus.io/plugin/ochttp/propagation/b3" + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// TODO(jbd): Add godoc examples. + +var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{} + +// Attributes recorded on the span for the requests. +// Only trace exporters will need them. +const ( + HostAttribute = "http.host" + MethodAttribute = "http.method" + PathAttribute = "http.path" + UserAgentAttribute = "http.user_agent" + StatusCodeAttribute = "http.status_code" +) + +type traceTransport struct { + base http.RoundTripper + startOptions trace.StartOptions + format propagation.HTTPFormat +} + +// TODO(jbd): Add message events for request and response size. + +// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers. +// The created span can follow a parent span, if a parent is presented in +// the request's context. +func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { + name := spanNameFromURL(req.URL) + // TODO(jbd): Discuss whether we want to prefix + // outgoing requests with Sent. + parent := trace.FromContext(req.Context()) + span := trace.NewSpan(name, parent, t.startOptions) + req = req.WithContext(trace.WithSpan(req.Context(), span)) + + if t.format != nil { + t.format.SpanContextToRequest(span.SpanContext(), req) + } + + span.AddAttributes(requestAttrs(req)...) + resp, err := t.base.RoundTrip(req) + if err != nil { + span.SetStatus(trace.Status{Code: 2, Message: err.Error()}) + span.End() + return resp, err + } + + span.AddAttributes(responseAttrs(resp)...) + span.SetStatus(status(resp.StatusCode)) + + // span.End() will be invoked after + // a read from resp.Body returns io.EOF or when + // resp.Body.Close() is invoked. + resp.Body = &bodyTracker{rc: resp.Body, span: span} + return resp, err +} + +// bodyTracker wraps a response.Body and invokes +// trace.EndSpan on encountering io.EOF on reading +// the body of the original response. +type bodyTracker struct { + rc io.ReadCloser + span *trace.Span +} + +var _ io.ReadCloser = (*bodyTracker)(nil) + +func (bt *bodyTracker) Read(b []byte) (int, error) { + n, err := bt.rc.Read(b) + + switch err { + case nil: + return n, nil + case io.EOF: + bt.span.End() + default: + // For all other errors, set the span status + bt.span.SetStatus(trace.Status{ + // Code 2 is the error code for Internal server error. + Code: 2, + Message: err.Error(), + }) + } + return n, err +} + +func (bt *bodyTracker) Close() error { + // Invoking endSpan on Close will help catch the cases + // in which a read returned a non-nil error, we set the + // span status but didn't end the span. + bt.span.End() + return bt.rc.Close() +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *traceTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base.(canceler); ok { + cr.CancelRequest(req) + } +} + +func spanNameFromURL(u *url.URL) string { + return u.Path +} + +func requestAttrs(r *http.Request) []trace.Attribute { + return []trace.Attribute{ + trace.StringAttribute(PathAttribute, r.URL.Path), + trace.StringAttribute(HostAttribute, r.URL.Host), + trace.StringAttribute(MethodAttribute, r.Method), + trace.StringAttribute(UserAgentAttribute, r.UserAgent()), + } +} + +func responseAttrs(resp *http.Response) []trace.Attribute { + return []trace.Attribute{ + trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)), + } +} + +func status(statusCode int) trace.Status { + var code int32 + if statusCode < 200 || statusCode >= 400 { + code = codeUnknown + } + switch statusCode { + case 499: + code = codeCancelled + case http.StatusBadRequest: + code = codeInvalidArgument + case http.StatusGatewayTimeout: + code = codeDeadlineExceeded + case http.StatusNotFound: + code = codeNotFound + case http.StatusForbidden: + code = codePermissionDenied + case http.StatusUnauthorized: // 401 is actually unauthenticated. + code = codeUnathenticated + case http.StatusTooManyRequests: + code = codeResourceExhausted + case http.StatusNotImplemented: + code = codeUnimplemented + case http.StatusServiceUnavailable: + code = codeUnavailable + } + return trace.Status{Code: code, Message: codeToStr[code]} +} + +// TODO(jbd): Provide status codes from trace package. +const ( + codeOK = 0 + codeCancelled = 1 + codeUnknown = 2 + codeInvalidArgument = 3 + codeDeadlineExceeded = 4 + codeNotFound = 5 + codeAlreadyExists = 6 + codePermissionDenied = 7 + codeResourceExhausted = 8 + codeFailedPrecondition = 9 + codeAborted = 10 + codeOutOfRange = 11 + codeUnimplemented = 12 + codeInternal = 13 + codeUnavailable = 14 + codeDataLoss = 15 + codeUnathenticated = 16 +) + +var codeToStr = map[int32]string{ + codeOK: `"OK"`, + codeCancelled: `"CANCELLED"`, + codeUnknown: `"UNKNOWN"`, + codeInvalidArgument: `"INVALID_ARGUMENT"`, + codeDeadlineExceeded: `"DEADLINE_EXCEEDED"`, + codeNotFound: `"NOT_FOUND"`, + codeAlreadyExists: `"ALREADY_EXISTS"`, + codePermissionDenied: `"PERMISSION_DENIED"`, + codeResourceExhausted: `"RESOURCE_EXHAUSTED"`, + codeFailedPrecondition: `"FAILED_PRECONDITION"`, + codeAborted: `"ABORTED"`, + codeOutOfRange: `"OUT_OF_RANGE"`, + codeUnimplemented: `"UNIMPLEMENTED"`, + codeInternal: `"INTERNAL"`, + codeUnavailable: `"UNAVAILABLE"`, + codeDataLoss: `"DATA_LOSS"`, + codeUnathenticated: `"UNAUTHENTICATED"`, +} diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go new file mode 100644 index 000000000..7a8a62c14 --- /dev/null +++ b/vendor/go.opencensus.io/stats/doc.go @@ -0,0 +1,55 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/* +Package stats contains support for OpenCensus stats recording. + +OpenCensus allows users to create typed measures, record measurements, +aggregate the collected data, and export the aggregated data. + +Measures + +A measure represents a type of metric to be tracked and recorded. +For example, latency, request Mb/s, and response Mb/s are measures +to collect from a server. + +Each measure needs to be registered before being used. Measure +constructors such as Int64 and Float64 automatically +register the measure by the given name. Each registered measure needs +to be unique by name. Measures also have a description and a unit. + +Libraries can define and export measures for their end users to +create views and collect instrumentation data. + +Recording measurements + +Measurement is a data point to be collected for a measure. For example, +for a latency (ms) measure, 100 is a measurement that represents a 100ms +latency event. Users collect data points on the existing measures with +the current context. Tags from the current context are recorded with the +measurements if they are any. + +Recorded measurements are dropped immediately if user is not aggregating +them via views. Users don't necessarily need to conditionally enable/disable +recording to reduce cost. Recording of measurements is cheap. + +Libraries can always record measurements, and end-users can later decide +on which measurements they want to collect by registering views. This allows +libraries to turn on the instrumentation by default. +*/ +package stats // import "go.opencensus.io/stats" + +// TODO(acetechnologist): Add a link to the language independent OpenCensus +// spec when it is available. diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go new file mode 100644 index 000000000..6341eb2ad --- /dev/null +++ b/vendor/go.opencensus.io/stats/internal/record.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "go.opencensus.io/tag" +) + +// DefaultRecorder will be called for each Record call. +var DefaultRecorder func(*tag.Map, interface{}) + +// SubscriptionReporter reports when a view subscribed with a measure. +var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/internal/validation.go b/vendor/go.opencensus.io/stats/internal/validation.go new file mode 100644 index 000000000..b962d524f --- /dev/null +++ b/vendor/go.opencensus.io/stats/internal/validation.go @@ -0,0 +1,28 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +const ( + MaxNameLength = 255 +) + +func IsPrintable(str string) bool { + for _, r := range str { + if !(r >= ' ' && r <= '~') { + return false + } + } + return true +} diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go new file mode 100644 index 000000000..aa555c209 --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure.go @@ -0,0 +1,96 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +import ( + "sync" + "sync/atomic" +) + +// Measure represents a type of metric to be tracked and recorded. +// For example, latency, request Mb/s, and response Mb/s are measures +// to collect from a server. +// +// Each measure needs to be registered before being used. +// Measure constructors such as Int64 and +// Float64 automatically registers the measure +// by the given name. +// Each registered measure needs to be unique by name. +// Measures also have a description and a unit. +type Measure interface { + Name() string + Description() string + Unit() string +} + +// measureDescriptor is the untyped descriptor associated with each measure. +// Int64Measure and Float64Measure wrap measureDescriptor to provide typed +// recording APIs. +// Two Measures with the same name will have the same measureDescriptor. +type measureDescriptor struct { + subs int32 // access atomically + + name string + description string + unit string +} + +func (m *measureDescriptor) subscribe() { + atomic.StoreInt32(&m.subs, 1) +} + +func (m *measureDescriptor) subscribed() bool { + return atomic.LoadInt32(&m.subs) == 1 +} + +var ( + mu sync.RWMutex + measures = make(map[string]*measureDescriptor) +) + +func registerMeasureHandle(name, desc, unit string) *measureDescriptor { + mu.Lock() + defer mu.Unlock() + + if stored, ok := measures[name]; ok { + return stored + } + m := &measureDescriptor{ + name: name, + description: desc, + unit: unit, + } + measures[name] = m + return m +} + +// Measurement is the numeric value measured when recording stats. Each measure +// provides methods to create measurements of their kind. For example, Int64Measure +// provides M to convert an int64 into a measurement. +type Measurement struct { + v float64 + m Measure +} + +// Value returns the value of the Measurement as a float64. +func (m Measurement) Value() float64 { + return m.v +} + +// Measure returns the Measure from which this Measurement was created. +func (m Measurement) Measure() Measure { + return m.m +} diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go new file mode 100644 index 000000000..8de6b5221 --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure_float64.go @@ -0,0 +1,52 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Float64Measure is a measure of type float64. +type Float64Measure struct { + md *measureDescriptor +} + +// Name returns the name of the measure. +func (m *Float64Measure) Name() string { + return m.md.name +} + +// Description returns the description of the measure. +func (m *Float64Measure) Description() string { + return m.md.description +} + +// Unit returns the unit of the measure. +func (m *Float64Measure) Unit() string { + return m.md.unit +} + +// M creates a new float64 measurement. +// Use Record to record measurements. +func (m *Float64Measure) M(v float64) Measurement { + if !m.md.subscribed() { + return Measurement{} + } + return Measurement{m: m, v: v} +} + +// Float64 creates a new measure of type Float64Measure. +// It never returns an error. +func Float64(name, description, unit string) *Float64Measure { + mi := registerMeasureHandle(name, description, unit) + return &Float64Measure{mi} +} diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go new file mode 100644 index 000000000..b6fd25f0d --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure_int64.go @@ -0,0 +1,52 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Int64Measure is a measure of type int64. +type Int64Measure struct { + md *measureDescriptor +} + +// Name returns the name of the measure. +func (m *Int64Measure) Name() string { + return m.md.name +} + +// Description returns the description of the measure. +func (m *Int64Measure) Description() string { + return m.md.description +} + +// Unit returns the unit of the measure. +func (m *Int64Measure) Unit() string { + return m.md.unit +} + +// M creates a new int64 measurement. +// Use Record to record measurements. +func (m *Int64Measure) M(v int64) Measurement { + if !m.md.subscribed() { + return Measurement{} + } + return Measurement{m: m, v: float64(v)} +} + +// Int64 creates a new measure of type Int64Measure. +// It never returns an error. +func Int64(name, description, unit string) *Int64Measure { + mi := registerMeasureHandle(name, description, unit) + return &Int64Measure{mi} +} diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go new file mode 100644 index 000000000..98865ff69 --- /dev/null +++ b/vendor/go.opencensus.io/stats/record.go @@ -0,0 +1,52 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +import ( + "context" + + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +func init() { + internal.SubscriptionReporter = func(measure string) { + mu.Lock() + measures[measure].subscribe() + mu.Unlock() + } +} + +// Record records one or multiple measurements with the same tags at once. +// If there are any tags in the context, measurements will be tagged with them. +func Record(ctx context.Context, ms ...Measurement) { + if len(ms) == 0 { + return + } + var record bool + for _, m := range ms { + if (m != Measurement{}) { + record = true + break + } + } + if !record { + return + } + if internal.DefaultRecorder != nil { + internal.DefaultRecorder(tag.FromContext(ctx), ms) + } +} diff --git a/vendor/go.opencensus.io/stats/units.go b/vendor/go.opencensus.io/stats/units.go new file mode 100644 index 000000000..d37e2152f --- /dev/null +++ b/vendor/go.opencensus.io/stats/units.go @@ -0,0 +1,24 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Units are encoded according to the case-sensitive abbreviations from the +// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html +const ( + UnitNone = "1" + UnitBytes = "By" + UnitMilliseconds = "ms" +) diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go new file mode 100644 index 000000000..b7f169b4a --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/aggregation.go @@ -0,0 +1,120 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +// AggType represents the type of aggregation function used on a View. +type AggType int + +// All available aggregation types. +const ( + AggTypeNone AggType = iota // no aggregation; reserved for future use. + AggTypeCount // the count aggregation, see Count. + AggTypeSum // the sum aggregation, see Sum. + AggTypeDistribution // the distribution aggregation, see Distribution. + AggTypeLastValue // the last value aggregation, see LastValue. +) + +func (t AggType) String() string { + return aggTypeName[t] +} + +var aggTypeName = map[AggType]string{ + AggTypeNone: "None", + AggTypeCount: "Count", + AggTypeSum: "Sum", + AggTypeDistribution: "Distribution", + AggTypeLastValue: "LastValue", +} + +// Aggregation represents a data aggregation method. Use one of the functions: +// Count, Sum, or Distribution to construct an Aggregation. +type Aggregation struct { + Type AggType // Type is the AggType of this Aggregation. + Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution. + + newData func() AggregationData +} + +var ( + aggCount = &Aggregation{ + Type: AggTypeCount, + newData: func() AggregationData { + return &CountData{} + }, + } + aggSum = &Aggregation{ + Type: AggTypeSum, + newData: func() AggregationData { + return &SumData{} + }, + } +) + +// Count indicates that data collected and aggregated +// with this method will be turned into a count value. +// For example, total number of accepted requests can be +// aggregated by using Count. +func Count() *Aggregation { + return aggCount +} + +// Sum indicates that data collected and aggregated +// with this method will be summed up. +// For example, accumulated request bytes can be aggregated by using +// Sum. +func Sum() *Aggregation { + return aggSum +} + +// Distribution indicates that the desired aggregation is +// a histogram distribution. +// +// An distribution aggregation may contain a histogram of the values in the +// population. The bucket boundaries for that histogram are described +// by the bounds. This defines len(bounds)+1 buckets. +// +// If len(bounds) >= 2 then the boundaries for bucket index i are: +// +// [-infinity, bounds[i]) for i = 0 +// [bounds[i-1], bounds[i]) for 0 < i < length +// [bounds[i-1], +infinity) for i = length +// +// If len(bounds) is 0 then there is no histogram associated with the +// distribution. There will be a single bucket with boundaries +// (-infinity, +infinity). +// +// If len(bounds) is 1 then there is no finite buckets, and that single +// element is the common boundary of the overflow and underflow buckets. +func Distribution(bounds ...float64) *Aggregation { + return &Aggregation{ + Type: AggTypeDistribution, + Buckets: bounds, + newData: func() AggregationData { + return newDistributionData(bounds) + }, + } +} + +// LastValue only reports the last value recorded using this +// aggregation. All other measurements will be dropped. +func LastValue() *Aggregation { + return &Aggregation{ + Type: AggTypeLastValue, + newData: func() AggregationData { + return &LastValueData{} + }, + } +} diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go new file mode 100644 index 000000000..88c500bff --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/aggregation_data.go @@ -0,0 +1,207 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "math" +) + +// AggregationData represents an aggregated value from a collection. +// They are reported on the view data during exporting. +// Mosts users won't directly access aggregration data. +type AggregationData interface { + isAggregationData() bool + addSample(v float64) + clone() AggregationData + equal(other AggregationData) bool +} + +const epsilon = 1e-9 + +// CountData is the aggregated data for the Count aggregation. +// A count aggregation processes data and counts the recordings. +// +// Most users won't directly access count data. +type CountData struct { + Value int64 +} + +func (a *CountData) isAggregationData() bool { return true } + +func (a *CountData) addSample(v float64) { + a.Value = a.Value + 1 +} + +func (a *CountData) clone() AggregationData { + return &CountData{Value: a.Value} +} + +func (a *CountData) equal(other AggregationData) bool { + a2, ok := other.(*CountData) + if !ok { + return false + } + + return a.Value == a2.Value +} + +// SumData is the aggregated data for the Sum aggregation. +// A sum aggregation processes data and sums up the recordings. +// +// Most users won't directly access sum data. +type SumData struct { + Value float64 +} + +func (a *SumData) isAggregationData() bool { return true } + +func (a *SumData) addSample(f float64) { + a.Value += f +} + +func (a *SumData) clone() AggregationData { + return &SumData{Value: a.Value} +} + +func (a *SumData) equal(other AggregationData) bool { + a2, ok := other.(*SumData) + if !ok { + return false + } + return math.Pow(a.Value-a2.Value, 2) < epsilon +} + +// DistributionData is the aggregated data for the +// Distribution aggregation. +// +// Most users won't directly access distribution data. +type DistributionData struct { + Count int64 // number of data points aggregated + Min float64 // minimum value in the distribution + Max float64 // max value in the distribution + Mean float64 // mean of the distribution + SumOfSquaredDev float64 // sum of the squared deviation from the mean + CountPerBucket []int64 // number of occurrences per bucket + bounds []float64 // histogram distribution of the values +} + +func newDistributionData(bounds []float64) *DistributionData { + return &DistributionData{ + CountPerBucket: make([]int64, len(bounds)+1), + bounds: bounds, + Min: math.MaxFloat64, + Max: math.SmallestNonzeroFloat64, + } +} + +// Sum returns the sum of all samples collected. +func (a *DistributionData) Sum() float64 { return a.Mean * float64(a.Count) } + +func (a *DistributionData) variance() float64 { + if a.Count <= 1 { + return 0 + } + return a.SumOfSquaredDev / float64(a.Count-1) +} + +func (a *DistributionData) isAggregationData() bool { return true } + +func (a *DistributionData) addSample(f float64) { + if f < a.Min { + a.Min = f + } + if f > a.Max { + a.Max = f + } + a.Count++ + a.incrementBucketCount(f) + + if a.Count == 1 { + a.Mean = f + return + } + + oldMean := a.Mean + a.Mean = a.Mean + (f-a.Mean)/float64(a.Count) + a.SumOfSquaredDev = a.SumOfSquaredDev + (f-oldMean)*(f-a.Mean) +} + +func (a *DistributionData) incrementBucketCount(f float64) { + if len(a.bounds) == 0 { + a.CountPerBucket[0]++ + return + } + + for i, b := range a.bounds { + if f < b { + a.CountPerBucket[i]++ + return + } + } + a.CountPerBucket[len(a.bounds)]++ +} + +func (a *DistributionData) clone() AggregationData { + counts := make([]int64, len(a.CountPerBucket)) + copy(counts, a.CountPerBucket) + c := *a + c.CountPerBucket = counts + return &c +} + +func (a *DistributionData) equal(other AggregationData) bool { + a2, ok := other.(*DistributionData) + if !ok { + return false + } + if a2 == nil { + return false + } + if len(a.CountPerBucket) != len(a2.CountPerBucket) { + return false + } + for i := range a.CountPerBucket { + if a.CountPerBucket[i] != a2.CountPerBucket[i] { + return false + } + } + return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon +} + +// LastValueData returns the last value recorded for LastValue aggregation. +type LastValueData struct { + Value float64 +} + +func (l *LastValueData) isAggregationData() bool { + return true +} + +func (l *LastValueData) addSample(v float64) { + l.Value = v +} + +func (l *LastValueData) clone() AggregationData { + return &LastValueData{l.Value} +} + +func (l *LastValueData) equal(other AggregationData) bool { + a2, ok := other.(*LastValueData) + if !ok { + return false + } + return l.Value == a2.Value +} diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go new file mode 100644 index 000000000..863a5b62a --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -0,0 +1,84 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "sort" + + "go.opencensus.io/internal/tagencoding" + "go.opencensus.io/tag" +) + +type collector struct { + // signatures holds the aggregations values for each unique tag signature + // (values for all keys) to its aggregator. + signatures map[string]AggregationData + // Aggregation is the description of the aggregation to perform for this + // view. + a *Aggregation +} + +func (c *collector) addSample(s string, v float64) { + aggregator, ok := c.signatures[s] + if !ok { + aggregator = c.a.newData() + c.signatures[s] = aggregator + } + aggregator.addSample(v) +} + +func (c *collector) collectedRows(keys []tag.Key) []*Row { + var rows []*Row + for sig, aggregator := range c.signatures { + tags := decodeTags([]byte(sig), keys) + row := &Row{tags, aggregator} + rows = append(rows, row) + } + return rows +} + +func (c *collector) clearRows() { + c.signatures = make(map[string]AggregationData) +} + +// encodeWithKeys encodes the map by using values +// only associated with the keys provided. +func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { + vb := &tagencoding.Values{ + Buffer: make([]byte, len(keys)), + } + for _, k := range keys { + v, _ := m.Value(k) + vb.WriteValue([]byte(v)) + } + return vb.Bytes() +} + +// decodeTags decodes tags from the buffer and +// orders them by the keys. +func decodeTags(buf []byte, keys []tag.Key) []tag.Tag { + vb := &tagencoding.Values{Buffer: buf} + var tags []tag.Tag + for _, k := range keys { + v := vb.ReadValue() + if v != nil { + tags = append(tags, tag.Tag{Key: k, Value: string(v)}) + } + } + vb.ReadIndex = 0 + sort.Slice(tags, func(i, j int) bool { return tags[i].Key.Name() < tags[j].Key.Name() }) + return tags +} diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go new file mode 100644 index 000000000..856fb4e15 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/doc.go @@ -0,0 +1,46 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/* +Package view contains support for collecting and exposing aggregates over stats. + +In order to collect measurements, views need to be defined and registered. +A view allows recorded measurements to be filtered and aggregated over a time window. + +All recorded measurements can be filtered by a list of tags. + +OpenCensus provides several aggregation methods: count, distribution and sum. +Count aggregation only counts the number of measurement points. Distribution +aggregation provides statistical summary of the aggregated data. Sum distribution +sums up the measurement points. Aggregations are cumulative. + +Users can dynamically create and delete views. + +Libraries can export their own views and claim the view names +by registering them themselves. + +Exporting + +Collected and aggregated data can be exported to a metric collection +backend by registering its exporter. + +Multiple exporters can be registered to upload the data to various +different backends. Users need to unregister the exporters once they +no longer are needed. +*/ +package view // import "go.opencensus.io/stats/view" + +// TODO(acetechnologist): Add a link to the language independent OpenCensus +// spec when it is available. diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go new file mode 100644 index 000000000..fac37871b --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/export.go @@ -0,0 +1,53 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package view + +import "sync" + +var ( + exportersMu sync.RWMutex // guards exporters + exporters = make(map[Exporter]struct{}) +) + +// Exporter exports the collected records as view data. +// +// The ExportView method should return quickly; if an +// Exporter takes a significant amount of time to +// process a Data, that work should be done on another goroutine. +// +// The Data should not be modified. +type Exporter interface { + ExportView(viewData *Data) +} + +// RegisterExporter registers an exporter. +// Collected data will be reported via all the +// registered exporters. Once you no longer +// want data to be exported, invoke UnregisterExporter +// with the previously registered exporter. +func RegisterExporter(e Exporter) { + exportersMu.Lock() + defer exportersMu.Unlock() + + exporters[e] = struct{}{} +} + +// UnregisterExporter unregisters an exporter. +func UnregisterExporter(e Exporter) { + exportersMu.Lock() + defer exportersMu.Unlock() + + delete(exporters, e) +} diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go new file mode 100644 index 000000000..d229d3e09 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/view.go @@ -0,0 +1,183 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "sync/atomic" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +// View allows users to aggregate the recorded stats.Measurements. +// Views need to be passed to the Subscribe function to be before data will be +// collected and sent to Exporters. +type View struct { + Name string // Name of View. Must be unique. If unset, will default to the name of the Measure. + Description string // Description is a human-readable description for this view. + + // TagKeys are the tag keys describing the grouping of this view. + // A single Row will be produced for each combination of associated tag values. + TagKeys []tag.Key + + // Measure is a stats.Measure to aggregate in this view. + Measure stats.Measure + + // Aggregation is the aggregation function tp apply to the set of Measurements. + Aggregation *Aggregation +} + +// WithName returns a copy of the View with a new name. This is useful for +// renaming views to cope with limitations placed on metric names by various +// backends. +func (v *View) WithName(name string) *View { + vNew := *v + vNew.Name = name + return &vNew +} + +// same compares two views and returns true if they represent the same aggregation. +func (v *View) same(other *View) bool { + if v == other { + return true + } + if v == nil { + return false + } + return reflect.DeepEqual(v.Aggregation, other.Aggregation) && + v.Measure.Name() == other.Measure.Name() +} + +// canonicalized returns a validated View canonicalized by setting explicit +// defaults for Name and Description and sorting the TagKeys +func (v *View) canonicalize() error { + if v.Measure == nil { + return fmt.Errorf("cannot subscribe view %q: measure not set", v.Name) + } + if v.Aggregation == nil { + return fmt.Errorf("cannot subscribe view %q: aggregation not set", v.Name) + } + if v.Name == "" { + v.Name = v.Measure.Name() + } + if v.Description == "" { + v.Description = v.Measure.Description() + } + if err := checkViewName(v.Name); err != nil { + return err + } + sort.Slice(v.TagKeys, func(i, j int) bool { + return v.TagKeys[i].Name() < v.TagKeys[j].Name() + }) + return nil +} + +// viewInternal is the internal representation of a View. +type viewInternal struct { + view *View // view is the canonicalized View definition associated with this view. + subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access + collector *collector +} + +func newViewInternal(v *View) (*viewInternal, error) { + return &viewInternal{ + view: v, + collector: &collector{make(map[string]AggregationData), v.Aggregation}, + }, nil +} + +func (v *viewInternal) subscribe() { + atomic.StoreUint32(&v.subscribed, 1) +} + +func (v *viewInternal) unsubscribe() { + atomic.StoreUint32(&v.subscribed, 0) +} + +// isSubscribed returns true if the view is exporting +// data by subscription. +func (v *viewInternal) isSubscribed() bool { + return atomic.LoadUint32(&v.subscribed) == 1 +} + +func (v *viewInternal) clearRows() { + v.collector.clearRows() +} + +func (v *viewInternal) collectedRows() []*Row { + return v.collector.collectedRows(v.view.TagKeys) +} + +func (v *viewInternal) addSample(m *tag.Map, val float64) { + if !v.isSubscribed() { + return + } + sig := string(encodeWithKeys(m, v.view.TagKeys)) + v.collector.addSample(sig, val) +} + +// A Data is a set of rows about usage of the single measure associated +// with the given view. Each row is specific to a unique set of tags. +type Data struct { + View *View + Start, End time.Time + Rows []*Row +} + +// Row is the collected value for a specific set of key value pairs a.k.a tags. +type Row struct { + Tags []tag.Tag + Data AggregationData +} + +func (r *Row) String() string { + var buffer bytes.Buffer + buffer.WriteString("{ ") + buffer.WriteString("{ ") + for _, t := range r.Tags { + buffer.WriteString(fmt.Sprintf("{%v %v}", t.Key.Name(), t.Value)) + } + buffer.WriteString(" }") + buffer.WriteString(fmt.Sprintf("%v", r.Data)) + buffer.WriteString(" }") + return buffer.String() +} + +// Equal returns true if both rows are equal. Tags are expected to be ordered +// by the key name. Even both rows have the same tags but the tags appear in +// different orders it will return false. +func (r *Row) Equal(other *Row) bool { + if r == other { + return true + } + return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data) +} + +func checkViewName(name string) error { + if len(name) > internal.MaxNameLength { + return fmt.Errorf("view name cannot be larger than %v", internal.MaxNameLength) + } + if !internal.IsPrintable(name) { + return fmt.Errorf("view name needs to be an ASCII string") + } + return nil +} diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go new file mode 100644 index 000000000..2c9b62126 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -0,0 +1,233 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "fmt" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +func init() { + defaultWorker = newWorker() + go defaultWorker.start() + internal.DefaultRecorder = record +} + +type measureRef struct { + measure string + views map[*viewInternal]struct{} +} + +type worker struct { + measures map[string]*measureRef + views map[string]*viewInternal + startTimes map[*viewInternal]time.Time + + timer *time.Ticker + c chan command + quit, done chan bool +} + +var defaultWorker *worker + +var defaultReportingDuration = 10 * time.Second + +// Find returns a subscribed view associated with this name. +// If no subscribed view is found, nil is returned. +func Find(name string) (v *View) { + req := &getViewByNameReq{ + name: name, + c: make(chan *getViewByNameResp), + } + defaultWorker.c <- req + resp := <-req.c + return resp.v +} + +// Register begins collecting data for the given views. +// Once a view is subscribed, it reports data to the registered exporters. +func Register(views ...*View) error { + for _, v := range views { + if err := v.canonicalize(); err != nil { + return err + } + } + req := ®isterViewReq{ + views: views, + err: make(chan error), + } + defaultWorker.c <- req + return <-req.err +} + +// Unregister the given views. Data will not longer be exported for these views +// after Unregister returns. +// It is not necessary to unregister from views you expect to collect for the +// duration of your program execution. +func Unregister(views ...*View) { + names := make([]string, len(views)) + for i := range views { + names[i] = views[i].Name + } + req := &unsubscribeFromViewReq{ + views: names, + done: make(chan struct{}), + } + defaultWorker.c <- req + <-req.done +} + +func RetrieveData(viewName string) ([]*Row, error) { + req := &retrieveDataReq{ + now: time.Now(), + v: viewName, + c: make(chan *retrieveDataResp), + } + defaultWorker.c <- req + resp := <-req.c + return resp.rows, resp.err +} + +func record(tags *tag.Map, ms interface{}) { + req := &recordReq{ + tm: tags, + ms: ms.([]stats.Measurement), + } + defaultWorker.c <- req +} + +// SetReportingPeriod sets the interval between reporting aggregated views in +// the program. If duration is less than or +// equal to zero, it enables the default behavior. +func SetReportingPeriod(d time.Duration) { + // TODO(acetechnologist): ensure that the duration d is more than a certain + // value. e.g. 1s + req := &setReportingPeriodReq{ + d: d, + c: make(chan bool), + } + defaultWorker.c <- req + <-req.c // don't return until the timer is set to the new duration. +} + +func newWorker() *worker { + return &worker{ + measures: make(map[string]*measureRef), + views: make(map[string]*viewInternal), + startTimes: make(map[*viewInternal]time.Time), + timer: time.NewTicker(defaultReportingDuration), + c: make(chan command, 1024), + quit: make(chan bool), + done: make(chan bool), + } +} + +func (w *worker) start() { + for { + select { + case cmd := <-w.c: + if cmd != nil { + cmd.handleCommand(w) + } + case <-w.timer.C: + w.reportUsage(time.Now()) + case <-w.quit: + w.timer.Stop() + close(w.c) + w.done <- true + return + } + } +} + +func (w *worker) stop() { + w.quit <- true + <-w.done +} + +func (w *worker) getMeasureRef(name string) *measureRef { + if mr, ok := w.measures[name]; ok { + return mr + } + mr := &measureRef{ + measure: name, + views: make(map[*viewInternal]struct{}), + } + w.measures[name] = mr + return mr +} + +func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { + vi, err := newViewInternal(v) + if err != nil { + return nil, err + } + if x, ok := w.views[vi.view.Name]; ok { + if !x.view.same(vi.view) { + return nil, fmt.Errorf("cannot subscribe view %q; a different view with the same name is already subscribed", v.Name) + } + + // the view is already registered so there is nothing to do and the + // command is considered successful. + return x, nil + } + w.views[vi.view.Name] = vi + ref := w.getMeasureRef(vi.view.Measure.Name()) + ref.views[vi] = struct{}{} + return vi, nil +} + +func (w *worker) reportUsage(now time.Time) { + for _, v := range w.views { + if !v.isSubscribed() { + continue + } + rows := v.collectedRows() + _, ok := w.startTimes[v] + if !ok { + w.startTimes[v] = now + } + // Make sure collector is never going + // to mutate the exported data. + rows = deepCopyRowData(rows) + viewData := &Data{ + View: v.view, + Start: w.startTimes[v], + End: time.Now(), + Rows: rows, + } + exportersMu.Lock() + for e := range exporters { + e.ExportView(viewData) + } + exportersMu.Unlock() + } +} + +func deepCopyRowData(rows []*Row) []*Row { + newRows := make([]*Row, 0, len(rows)) + for _, r := range rows { + newRows = append(newRows, &Row{ + Data: r.Data.clone(), + Tags: r.Tags, + }) + } + return newRows +} diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go new file mode 100644 index 000000000..0d244ac7e --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/worker_commands.go @@ -0,0 +1,170 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "errors" + "fmt" + "strings" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +type command interface { + handleCommand(w *worker) +} + +// getViewByNameReq is the command to get a view given its name. +type getViewByNameReq struct { + name string + c chan *getViewByNameResp +} + +type getViewByNameResp struct { + v *View +} + +func (cmd *getViewByNameReq) handleCommand(w *worker) { + v := w.views[cmd.name] + if v == nil { + cmd.c <- &getViewByNameResp{nil} + return + } + cmd.c <- &getViewByNameResp{v.view} +} + +// registerViewReq is the command to register a view. +type registerViewReq struct { + views []*View + err chan error +} + +func (cmd *registerViewReq) handleCommand(w *worker) { + var errstr []string + for _, view := range cmd.views { + vi, err := w.tryRegisterView(view) + if err != nil { + errstr = append(errstr, fmt.Sprintf("%s: %v", view.Name, err)) + continue + } + internal.SubscriptionReporter(view.Measure.Name()) + vi.subscribe() + } + if len(errstr) > 0 { + cmd.err <- errors.New(strings.Join(errstr, "\n")) + } else { + cmd.err <- nil + } +} + +// unsubscribeFromViewReq is the command to unsubscribe to a view. Has no +// impact on the data collection for client that are pulling data from the +// library. +type unsubscribeFromViewReq struct { + views []string + done chan struct{} +} + +func (cmd *unsubscribeFromViewReq) handleCommand(w *worker) { + for _, name := range cmd.views { + vi, ok := w.views[name] + if !ok { + continue + } + + vi.unsubscribe() + if !vi.isSubscribed() { + // this was the last subscription and view is not collecting anymore. + // The collected data can be cleared. + vi.clearRows() + } + } + cmd.done <- struct{}{} +} + +// retrieveDataReq is the command to retrieve data for a view. +type retrieveDataReq struct { + now time.Time + v string + c chan *retrieveDataResp +} + +type retrieveDataResp struct { + rows []*Row + err error +} + +func (cmd *retrieveDataReq) handleCommand(w *worker) { + vi, ok := w.views[cmd.v] + if !ok { + cmd.c <- &retrieveDataResp{ + nil, + fmt.Errorf("cannot retrieve data; view %q is not registered", cmd.v), + } + return + } + + if !vi.isSubscribed() { + cmd.c <- &retrieveDataResp{ + nil, + fmt.Errorf("cannot retrieve data; view %q has no subscriptions or collection is not forcibly started", cmd.v), + } + return + } + cmd.c <- &retrieveDataResp{ + vi.collectedRows(), + nil, + } +} + +// recordReq is the command to record data related to multiple measures +// at once. +type recordReq struct { + tm *tag.Map + ms []stats.Measurement +} + +func (cmd *recordReq) handleCommand(w *worker) { + for _, m := range cmd.ms { + if (m == stats.Measurement{}) { // not subscribed + continue + } + ref := w.getMeasureRef(m.Measure().Name()) + for v := range ref.views { + v.addSample(cmd.tm, m.Value()) + } + } +} + +// setReportingPeriodReq is the command to modify the duration between +// reporting the collected data to the subscribed clients. +type setReportingPeriodReq struct { + d time.Duration + c chan bool +} + +func (cmd *setReportingPeriodReq) handleCommand(w *worker) { + w.timer.Stop() + if cmd.d <= 0 { + w.timer = time.NewTicker(defaultReportingDuration) + } else { + w.timer = time.NewTicker(cmd.d) + } + cmd.c <- true +} diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go new file mode 100644 index 000000000..ed528bcb3 --- /dev/null +++ b/vendor/go.opencensus.io/tag/context.go @@ -0,0 +1,41 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import "context" + +// FromContext returns the tag map stored in the context. +func FromContext(ctx context.Context) *Map { + // The returned tag map shouldn't be mutated. + ts := ctx.Value(mapCtxKey) + if ts == nil { + return nil + } + return ts.(*Map) +} + +// NewContext creates a new context with the given tag map. +// To propagate a tag map to downstream methods and downstream RPCs, add a tag map +// to the current context. NewContext will return a copy of the current context, +// and put the tag map into the returned one. +// If there is already a tag map in the current context, it will be replaced with m. +func NewContext(ctx context.Context, m *Map) context.Context { + return context.WithValue(ctx, mapCtxKey, m) +} + +type ctxKey struct{} + +var mapCtxKey = ctxKey{} diff --git a/vendor/go.opencensus.io/tag/doc.go b/vendor/go.opencensus.io/tag/doc.go new file mode 100644 index 000000000..da16b74e4 --- /dev/null +++ b/vendor/go.opencensus.io/tag/doc.go @@ -0,0 +1,26 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/* +Package tag contains OpenCensus tags. + +Tags are key-value pairs. Tags provide additional cardinality to +the OpenCensus instrumentation data. + +Tags can be propagated on the wire and in the same +process via context.Context. Encode and Decode should be +used to represent tags into their binary propagation form. +*/ +package tag // import "go.opencensus.io/tag" diff --git a/vendor/go.opencensus.io/tag/key.go b/vendor/go.opencensus.io/tag/key.go new file mode 100644 index 000000000..ebbed9500 --- /dev/null +++ b/vendor/go.opencensus.io/tag/key.go @@ -0,0 +1,35 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +// Key represents a tag key. +type Key struct { + name string +} + +// NewKey creates or retrieves a string key identified by name. +// Calling NewKey consequently with the same name returns the same key. +func NewKey(name string) (Key, error) { + if !checkKeyName(name) { + return Key{}, errInvalidKeyName + } + return Key{name: name}, nil +} + +// Name returns the name of the key. +func (k Key) Name() string { + return k.name +} diff --git a/vendor/go.opencensus.io/tag/map.go b/vendor/go.opencensus.io/tag/map.go new file mode 100644 index 000000000..f6237b313 --- /dev/null +++ b/vendor/go.opencensus.io/tag/map.go @@ -0,0 +1,197 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "bytes" + "context" + "fmt" + "sort" +) + +// Tag is a key value pair that can be propagated on wire. +type Tag struct { + Key Key + Value string +} + +// Map is a map of tags. Use NewMap to build tag maps. +type Map struct { + m map[Key]string +} + +// Value returns the value for the key if a value +// for the key exists. +func (m *Map) Value(k Key) (string, bool) { + if m == nil { + return "", false + } + v, ok := m.m[k] + return v, ok +} + +func (m *Map) String() string { + if m == nil { + return "nil" + } + var keys []Key + for k := range m.m { + keys = append(keys, k) + } + sort.Slice(keys, func(i, j int) bool { return keys[i].Name() < keys[j].Name() }) + + var buffer bytes.Buffer + buffer.WriteString("{ ") + for _, k := range keys { + buffer.WriteString(fmt.Sprintf("{%v %v}", k.name, m.m[k])) + } + buffer.WriteString(" }") + return buffer.String() +} + +func (m *Map) insert(k Key, v string) { + if _, ok := m.m[k]; ok { + return + } + m.m[k] = v +} + +func (m *Map) update(k Key, v string) { + if _, ok := m.m[k]; ok { + m.m[k] = v + } +} + +func (m *Map) upsert(k Key, v string) { + m.m[k] = v +} + +func (m *Map) delete(k Key) { + delete(m.m, k) +} + +func newMap(sizeHint int) *Map { + return &Map{m: make(map[Key]string, sizeHint)} +} + +// Mutator modifies a tag map. +type Mutator interface { + Mutate(t *Map) (*Map, error) +} + +// Insert returns a mutator that inserts a +// value associated with k. If k already exists in the tag map, +// mutator doesn't update the value. +func Insert(k Key, v string) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.insert(k, v) + return m, nil + }, + } +} + +// Update returns a mutator that updates the +// value of the tag associated with k with v. If k doesn't +// exists in the tag map, the mutator doesn't insert the value. +func Update(k Key, v string) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.update(k, v) + return m, nil + }, + } +} + +// Upsert returns a mutator that upserts the +// value of the tag associated with k with v. It inserts the +// value if k doesn't exist already. It mutates the value +// if k already exists. +func Upsert(k Key, v string) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.upsert(k, v) + return m, nil + }, + } +} + +// Delete returns a mutator that deletes +// the value associated with k. +func Delete(k Key) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + m.delete(k) + return m, nil + }, + } +} + +// New returns a new context that contains a tag map +// originated from the incoming context and modified +// with the provided mutators. +func New(ctx context.Context, mutator ...Mutator) (context.Context, error) { + m := newMap(0) + orig := FromContext(ctx) + if orig != nil { + for k, v := range orig.m { + if !checkKeyName(k.Name()) { + return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName) + } + if !checkValue(v) { + return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue) + } + m.insert(k, v) + } + } + var err error + for _, mod := range mutator { + m, err = mod.Mutate(m) + if err != nil { + return ctx, err + } + } + return NewContext(ctx, m), nil +} + +// Do is similar to pprof.Do: a convenience for installing the tags +// from the context as Go profiler labels. This allows you to +// correlated runtime profiling with stats. +// +// It converts the key/values from the given map to Go profiler labels +// and calls pprof.Do. +// +// Do is going to do nothing if your Go version is below 1.9. +func Do(ctx context.Context, f func(ctx context.Context)) { + do(ctx, f) +} + +type mutator struct { + fn func(t *Map) (*Map, error) +} + +func (m *mutator) Mutate(t *Map) (*Map, error) { + return m.fn(t) +} diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go new file mode 100644 index 000000000..38f545918 --- /dev/null +++ b/vendor/go.opencensus.io/tag/map_codec.go @@ -0,0 +1,221 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "encoding/binary" + "fmt" +) + +// KeyType defines the types of keys allowed. Currently only keyTypeString is +// supported. +type keyType byte + +const ( + keyTypeString keyType = iota + keyTypeInt64 + keyTypeTrue + keyTypeFalse + + tagsVersionID = byte(0) +) + +type encoderGRPC struct { + buf []byte + writeIdx, readIdx int +} + +// writeKeyString writes the fieldID '0' followed by the key string and value +// string. +func (eg *encoderGRPC) writeTagString(k, v string) { + eg.writeByte(byte(keyTypeString)) + eg.writeStringWithVarintLen(k) + eg.writeStringWithVarintLen(v) +} + +func (eg *encoderGRPC) writeTagUint64(k string, i uint64) { + eg.writeByte(byte(keyTypeInt64)) + eg.writeStringWithVarintLen(k) + eg.writeUint64(i) +} + +func (eg *encoderGRPC) writeTagTrue(k string) { + eg.writeByte(byte(keyTypeTrue)) + eg.writeStringWithVarintLen(k) +} + +func (eg *encoderGRPC) writeTagFalse(k string) { + eg.writeByte(byte(keyTypeFalse)) + eg.writeStringWithVarintLen(k) +} + +func (eg *encoderGRPC) writeBytesWithVarintLen(bytes []byte) { + length := len(bytes) + + eg.growIfRequired(binary.MaxVarintLen64 + length) + eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) + copy(eg.buf[eg.writeIdx:], bytes) + eg.writeIdx += length +} + +func (eg *encoderGRPC) writeStringWithVarintLen(s string) { + length := len(s) + + eg.growIfRequired(binary.MaxVarintLen64 + length) + eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) + copy(eg.buf[eg.writeIdx:], s) + eg.writeIdx += length +} + +func (eg *encoderGRPC) writeByte(v byte) { + eg.growIfRequired(1) + eg.buf[eg.writeIdx] = v + eg.writeIdx++ +} + +func (eg *encoderGRPC) writeUint32(i uint32) { + eg.growIfRequired(4) + binary.LittleEndian.PutUint32(eg.buf[eg.writeIdx:], i) + eg.writeIdx += 4 +} + +func (eg *encoderGRPC) writeUint64(i uint64) { + eg.growIfRequired(8) + binary.LittleEndian.PutUint64(eg.buf[eg.writeIdx:], i) + eg.writeIdx += 8 +} + +func (eg *encoderGRPC) readByte() byte { + b := eg.buf[eg.readIdx] + eg.readIdx++ + return b +} + +func (eg *encoderGRPC) readUint32() uint32 { + i := binary.LittleEndian.Uint32(eg.buf[eg.readIdx:]) + eg.readIdx += 4 + return i +} + +func (eg *encoderGRPC) readUint64() uint64 { + i := binary.LittleEndian.Uint64(eg.buf[eg.readIdx:]) + eg.readIdx += 8 + return i +} + +func (eg *encoderGRPC) readBytesWithVarintLen() ([]byte, error) { + if eg.readEnded() { + return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) + } + length, valueStart := binary.Uvarint(eg.buf[eg.readIdx:]) + if valueStart <= 0 { + return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) + } + + valueStart += eg.readIdx + valueEnd := valueStart + int(length) + if valueEnd > len(eg.buf) { + return nil, fmt.Errorf("malformed encoding: length:%v, upper:%v, maxLength:%v", length, valueEnd, len(eg.buf)) + } + + eg.readIdx = valueEnd + return eg.buf[valueStart:valueEnd], nil +} + +func (eg *encoderGRPC) readStringWithVarintLen() (string, error) { + bytes, err := eg.readBytesWithVarintLen() + if err != nil { + return "", err + } + return string(bytes), nil +} + +func (eg *encoderGRPC) growIfRequired(expected int) { + if len(eg.buf)-eg.writeIdx < expected { + tmp := make([]byte, 2*(len(eg.buf)+1)+expected) + copy(tmp, eg.buf) + eg.buf = tmp + } +} + +func (eg *encoderGRPC) readEnded() bool { + return eg.readIdx >= len(eg.buf) +} + +func (eg *encoderGRPC) bytes() []byte { + return eg.buf[:eg.writeIdx] +} + +// Encode encodes the tag map into a []byte. It is useful to propagate +// the tag maps on wire in binary format. +func Encode(m *Map) []byte { + eg := &encoderGRPC{ + buf: make([]byte, len(m.m)), + } + eg.writeByte(byte(tagsVersionID)) + for k, v := range m.m { + eg.writeByte(byte(keyTypeString)) + eg.writeStringWithVarintLen(k.name) + eg.writeBytesWithVarintLen([]byte(v)) + } + return eg.bytes() +} + +// Decode decodes the given []byte into a tag map. +func Decode(bytes []byte) (*Map, error) { + ts := newMap(0) + + eg := &encoderGRPC{ + buf: bytes, + } + if len(eg.buf) == 0 { + return ts, nil + } + + version := eg.readByte() + if version > tagsVersionID { + return nil, fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID) + } + + for !eg.readEnded() { + typ := keyType(eg.readByte()) + + if typ != keyTypeString { + return nil, fmt.Errorf("cannot decode: invalid key type: %q", typ) + } + + k, err := eg.readBytesWithVarintLen() + if err != nil { + return nil, err + } + + v, err := eg.readBytesWithVarintLen() + if err != nil { + return nil, err + } + + key, err := NewKey(string(k)) + if err != nil { + return nil, err // no partial failures + } + val := string(v) + if !checkValue(val) { + return nil, errInvalidValue // no partial failures + } + ts.upsert(key, val) + } + return ts, nil +} diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go new file mode 100644 index 000000000..f81cd0b4a --- /dev/null +++ b/vendor/go.opencensus.io/tag/profile_19.go @@ -0,0 +1,31 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.9 + +package tag + +import ( + "context" + "runtime/pprof" +) + +func do(ctx context.Context, f func(ctx context.Context)) { + m := FromContext(ctx) + keyvals := make([]string, 0, 2*len(m.m)) + for k, v := range m.m { + keyvals = append(keyvals, k.Name(), v) + } + pprof.Do(ctx, pprof.Labels(keyvals...), f) +} diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go new file mode 100644 index 000000000..83adbce56 --- /dev/null +++ b/vendor/go.opencensus.io/tag/profile_not19.go @@ -0,0 +1,23 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.9 + +package tag + +import "context" + +func do(ctx context.Context, f func(ctx context.Context)) { + f(ctx) +} diff --git a/vendor/go.opencensus.io/tag/validate.go b/vendor/go.opencensus.io/tag/validate.go new file mode 100644 index 000000000..0939fc674 --- /dev/null +++ b/vendor/go.opencensus.io/tag/validate.go @@ -0,0 +1,56 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tag + +import "errors" + +const ( + maxKeyLength = 255 + + // valid are restricted to US-ASCII subset (range 0x20 (' ') to 0x7e ('~')). + validKeyValueMin = 32 + validKeyValueMax = 126 +) + +var ( + errInvalidKeyName = errors.New("invalid key name: only ASCII characters accepted; max length must be 255 characters") + errInvalidValue = errors.New("invalid value: only ASCII characters accepted; max length must be 255 characters") +) + +func checkKeyName(name string) bool { + if len(name) == 0 { + return false + } + if len(name) > maxKeyLength { + return false + } + return isASCII(name) +} + +func isASCII(s string) bool { + for _, c := range s { + if (c < validKeyValueMin) || (c > validKeyValueMax) { + return false + } + } + return true +} + +func checkValue(v string) bool { + if len(v) > maxKeyLength { + return false + } + return isASCII(v) +} diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go new file mode 100644 index 000000000..01f0f9083 --- /dev/null +++ b/vendor/go.opencensus.io/trace/basetypes.go @@ -0,0 +1,114 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "fmt" + "time" +) + +type ( + // TraceID is a 16-byte identifier for a set of spans. + TraceID [16]byte + + // SpanID is an 8-byte identifier for a single span. + SpanID [8]byte +) + +func (t TraceID) String() string { + return fmt.Sprintf("%02x", t[:]) +} + +func (s SpanID) String() string { + return fmt.Sprintf("%02x", s[:]) +} + +// Annotation represents a text annotation with a set of attributes and a timestamp. +type Annotation struct { + Time time.Time + Message string + Attributes map[string]interface{} +} + +// Attribute represents a key-value pair on a span, link or annotation. +// Construct with one of: BoolAttribute, Int64Attribute, or StringAttribute. +type Attribute struct { + key string + value interface{} +} + +// BoolAttribute returns a bool-valued attribute. +func BoolAttribute(key string, value bool) Attribute { + return Attribute{key: key, value: value} +} + +// Int64Attribute returns an int64-valued attribute. +func Int64Attribute(key string, value int64) Attribute { + return Attribute{key: key, value: value} +} + +// StringAttribute returns a string-valued attribute. +func StringAttribute(key string, value string) Attribute { + return Attribute{key: key, value: value} +} + +// LinkType specifies the relationship between the span that had the link +// added, and the linked span. +type LinkType int32 + +// LinkType values. +const ( + LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown. + LinkTypeChild // The current span is a child of the linked span. + LinkTypeParent // The current span is the parent of the linked span. +) + +// Link represents a reference from one span to another span. +type Link struct { + TraceID TraceID + SpanID SpanID + Type LinkType + // Attributes is a set of attributes on the link. + Attributes map[string]interface{} +} + +// MessageEventType specifies the type of message event. +type MessageEventType int32 + +// MessageEventType values. +const ( + MessageEventTypeUnspecified MessageEventType = iota // Unknown event type. + MessageEventTypeSent // Indicates a sent RPC message. + MessageEventTypeRecv // Indicates a received RPC message. +) + +// MessageEvent represents an event describing a message sent or received on the network. +type MessageEvent struct { + Time time.Time + EventType MessageEventType + MessageID int64 + UncompressedByteSize int64 + CompressedByteSize int64 +} + +// Status is the status of a Span. +type Status struct { + // Code is a status code. Zero indicates success. + // + // If Code will be propagated to Google APIs, it ideally should be a value from + // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto . + Code int32 + Message string +} diff --git a/vendor/go.opencensus.io/trace/config.go b/vendor/go.opencensus.io/trace/config.go new file mode 100644 index 000000000..f3df9be45 --- /dev/null +++ b/vendor/go.opencensus.io/trace/config.go @@ -0,0 +1,40 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import "go.opencensus.io/trace/internal" + +// Config represents the global tracing configuration. +type Config struct { + // DefaultSampler is the default sampler used when creating new spans. + DefaultSampler Sampler + + // IDGenerator is for internal use only. + IDGenerator internal.IDGenerator +} + +// ApplyConfig applies changes to the global tracing configuration. +// +// Fields not provided in the given config are going to be preserved. +func ApplyConfig(cfg Config) { + c := config.Load().(*Config) + if cfg.DefaultSampler != nil { + c.DefaultSampler = cfg.DefaultSampler + } + if cfg.IDGenerator != nil { + c.IDGenerator = cfg.IDGenerator + } + config.Store(c) +} diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go new file mode 100644 index 000000000..2501271ef --- /dev/null +++ b/vendor/go.opencensus.io/trace/doc.go @@ -0,0 +1,55 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package trace contains types for representing trace information, and +functions for global configuration of tracing. + +The following assumes a basic familiarity with OpenCensus concepts. +See http://opencensus.io + + +Enabling Tracing for a Program + +To use OpenCensus tracing, register at least one Exporter. You can use +one of the provided exporters or write your own. + + trace.RegisterExporter(anExporter) + +By default, traces will be sampled relatively rarely. To change the sampling +frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler +to sample a subset of traces, or use AlwaysSample to collect a trace on every run: + + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + + +Adding Spans to a Trace + +A trace consists of a tree of spans. In Go, the current span is carried in a +context.Context. + +It is common to want to capture all the activity of a function call in a span. For +this to work, the function must take a context.Context as a parameter. Add these two +lines to the top of the function: + + ctx, span := trace.StartSpan(ctx, "your choice of name") + defer span.End() + +StartSpan will create a new top-level span if the context +doesn't contain another span, otherwise it will create a child span. + +As a suggestion, use the fully-qualified function name as the span name, e.g. +"github.com/me/mypackage.Run". +*/ +package trace // import "go.opencensus.io/trace" diff --git a/vendor/go.opencensus.io/trace/export.go b/vendor/go.opencensus.io/trace/export.go new file mode 100644 index 000000000..086612c9a --- /dev/null +++ b/vendor/go.opencensus.io/trace/export.go @@ -0,0 +1,74 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + "time" +) + +// Exporter is a type for functions that receive sampled trace spans. +// +// The ExportSpan method should be safe for concurrent use and should return +// quickly; if an Exporter takes a significant amount of time to process a +// SpanData, that work should be done on another goroutine. +// +// The SpanData should not be modified, but a pointer to it can be kept. +type Exporter interface { + ExportSpan(s *SpanData) +} + +var ( + exportersMu sync.Mutex + exporters map[Exporter]struct{} +) + +// RegisterExporter adds to the list of Exporters that will receive sampled +// trace spans. +func RegisterExporter(e Exporter) { + exportersMu.Lock() + if exporters == nil { + exporters = make(map[Exporter]struct{}) + } + exporters[e] = struct{}{} + exportersMu.Unlock() +} + +// UnregisterExporter removes from the list of Exporters the Exporter that was +// registered with the given name. +func UnregisterExporter(e Exporter) { + exportersMu.Lock() + delete(exporters, e) + exportersMu.Unlock() +} + +// SpanData contains all the information collected by a Span. +type SpanData struct { + SpanContext + ParentSpanID SpanID + SpanKind int + Name string + StartTime time.Time + // The wall clock time of EndTime will be adjusted to always be offset + // from StartTime by the duration of the span. + EndTime time.Time + // The values of Attributes each have type string, bool, or int64. + Attributes map[string]interface{} + Annotations []Annotation + MessageEvents []MessageEvent + Status + Links []Link + HasRemoteParent bool +} diff --git a/vendor/go.opencensus.io/trace/internal/internal.go b/vendor/go.opencensus.io/trace/internal/internal.go new file mode 100644 index 000000000..1c8b9b34b --- /dev/null +++ b/vendor/go.opencensus.io/trace/internal/internal.go @@ -0,0 +1,21 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides trace internals. +package internal + +type IDGenerator interface { + NewTraceID() [16]byte + NewSpanID() [8]byte +} diff --git a/vendor/go.opencensus.io/trace/propagation/propagation.go b/vendor/go.opencensus.io/trace/propagation/propagation.go new file mode 100644 index 000000000..a3b00f474 --- /dev/null +++ b/vendor/go.opencensus.io/trace/propagation/propagation.go @@ -0,0 +1,108 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package propagation implements the binary trace context format. +package propagation + +// TODO: link to external spec document. + +// BinaryFormat format: +// +// Binary value: +// version_id: 1 byte representing the version id. +// +// For version_id = 0: +// +// version_format: +// field_format: +// +// Fields: +// +// TraceId: (field_id = 0, len = 16, default = "0000000000000000") - 16-byte array representing the trace_id. +// SpanId: (field_id = 1, len = 8, default = "00000000") - 8-byte array representing the span_id. +// TraceOptions: (field_id = 2, len = 1, default = "0") - 1-byte array representing the trace_options. +// +// Fields MUST be encoded using the field id order (smaller to higher). +// +// Valid value example: +// +// {0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, +// 98, 99, 100, 101, 102, 103, 104, 2, 1} +// +// version_id = 0; +// trace_id = {64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79} +// span_id = {97, 98, 99, 100, 101, 102, 103, 104}; +// trace_options = {1}; + +import ( + "net/http" + + "go.opencensus.io/trace" +) + +// Binary returns the binary format representation of a SpanContext. +// +// If sc is the zero value, Binary returns nil. +func Binary(sc trace.SpanContext) []byte { + if sc == (trace.SpanContext{}) { + return nil + } + var b [29]byte + copy(b[2:18], sc.TraceID[:]) + b[18] = 1 + copy(b[19:27], sc.SpanID[:]) + b[27] = 2 + b[28] = uint8(sc.TraceOptions) + return b[:] +} + +// FromBinary returns the SpanContext represented by b. +// +// If b has an unsupported version ID or contains no TraceID, FromBinary +// returns with ok==false. +func FromBinary(b []byte) (sc trace.SpanContext, ok bool) { + if len(b) == 0 || b[0] != 0 { + return trace.SpanContext{}, false + } + b = b[1:] + if len(b) >= 17 && b[0] == 0 { + copy(sc.TraceID[:], b[1:17]) + b = b[17:] + } else { + return trace.SpanContext{}, false + } + if len(b) >= 9 && b[0] == 1 { + copy(sc.SpanID[:], b[1:9]) + b = b[9:] + } + if len(b) >= 2 && b[0] == 2 { + sc.TraceOptions = trace.TraceOptions(b[1]) + } + return sc, true +} + +// HTTPFormat implementations propagate span contexts +// in HTTP requests. +// +// SpanContextFromRequest extracts a span context from incoming +// requests. +// +// SpanContextToRequest modifies the given request to include the given +// span context. +type HTTPFormat interface { + SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) + SpanContextToRequest(sc trace.SpanContext, req *http.Request) +} + +// TODO(jbd): Find a more representative but short name for HTTPFormat. diff --git a/vendor/go.opencensus.io/trace/sampling.go b/vendor/go.opencensus.io/trace/sampling.go new file mode 100644 index 000000000..313f8b68e --- /dev/null +++ b/vendor/go.opencensus.io/trace/sampling.go @@ -0,0 +1,76 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "encoding/binary" +) + +const defaultSamplingProbability = 1e-4 + +func newDefaultSampler() Sampler { + return ProbabilitySampler(defaultSamplingProbability) +} + +// Sampler decides whether a trace should be sampled and exported. +type Sampler func(SamplingParameters) SamplingDecision + +// SamplingParameters contains the values passed to a Sampler. +type SamplingParameters struct { + ParentContext SpanContext + TraceID TraceID + SpanID SpanID + Name string + HasRemoteParent bool +} + +// SamplingDecision is the value returned by a Sampler. +type SamplingDecision struct { + Sample bool +} + +// ProbabilitySampler returns a Sampler that samples a given fraction of traces. +// +// It also samples spans whose parents are sampled. +func ProbabilitySampler(fraction float64) Sampler { + if !(fraction >= 0) { + fraction = 0 + } else if fraction >= 1 { + return AlwaysSample() + } + + traceIDUpperBound := uint64(fraction * (1 << 63)) + return Sampler(func(p SamplingParameters) SamplingDecision { + if p.ParentContext.IsSampled() { + return SamplingDecision{Sample: true} + } + x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 + return SamplingDecision{Sample: x < traceIDUpperBound} + }) +} + +// AlwaysSample returns a Sampler that samples every trace. +func AlwaysSample() Sampler { + return func(p SamplingParameters) SamplingDecision { + return SamplingDecision{Sample: true} + } +} + +// NeverSample returns a Sampler that samples no traces. +func NeverSample() Sampler { + return func(p SamplingParameters) SamplingDecision { + return SamplingDecision{Sample: false} + } +} diff --git a/vendor/go.opencensus.io/trace/spanbucket.go b/vendor/go.opencensus.io/trace/spanbucket.go new file mode 100644 index 000000000..fbabad34c --- /dev/null +++ b/vendor/go.opencensus.io/trace/spanbucket.go @@ -0,0 +1,130 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "time" +) + +// samplePeriod is the minimum time between accepting spans in a single bucket. +const samplePeriod = time.Second + +// defaultLatencies contains the default latency bucket bounds. +// TODO: consider defaults, make configurable +var defaultLatencies = [...]time.Duration{ + 10 * time.Microsecond, + 100 * time.Microsecond, + time.Millisecond, + 10 * time.Millisecond, + 100 * time.Millisecond, + time.Second, + 10 * time.Second, + time.Minute, +} + +// bucket is a container for a set of spans for a particular error code or latency range. +type bucket struct { + nextTime time.Time // next time we can accept a span + buffer []*SpanData // circular buffer of spans + nextIndex int // location next SpanData should be placed in buffer + overflow bool // whether the circular buffer has wrapped around +} + +func makeBucket(bufferSize int) bucket { + return bucket{ + buffer: make([]*SpanData, bufferSize), + } +} + +// add adds a span to the bucket, if nextTime has been reached. +func (b *bucket) add(s *SpanData) { + if s.EndTime.Before(b.nextTime) { + return + } + if len(b.buffer) == 0 { + return + } + b.nextTime = s.EndTime.Add(samplePeriod) + b.buffer[b.nextIndex] = s + b.nextIndex++ + if b.nextIndex == len(b.buffer) { + b.nextIndex = 0 + b.overflow = true + } +} + +// size returns the number of spans in the bucket. +func (b *bucket) size() int { + if b.overflow { + return len(b.buffer) + } + return b.nextIndex +} + +// span returns the ith span in the bucket. +func (b *bucket) span(i int) *SpanData { + if !b.overflow { + return b.buffer[i] + } + if i < len(b.buffer)-b.nextIndex { + return b.buffer[b.nextIndex+i] + } + return b.buffer[b.nextIndex+i-len(b.buffer)] +} + +// resize changes the size of the bucket to n, keeping up to n existing spans. +func (b *bucket) resize(n int) { + cur := b.size() + newBuffer := make([]*SpanData, n) + if cur < n { + for i := 0; i < cur; i++ { + newBuffer[i] = b.span(i) + } + b.buffer = newBuffer + b.nextIndex = cur + b.overflow = false + return + } + for i := 0; i < n; i++ { + newBuffer[i] = b.span(i + cur - n) + } + b.buffer = newBuffer + b.nextIndex = 0 + b.overflow = true +} + +// latencyBucket returns the appropriate bucket number for a given latency. +func latencyBucket(latency time.Duration) int { + i := 0 + for i < len(defaultLatencies) && latency >= defaultLatencies[i] { + i++ + } + return i +} + +// latencyBucketBounds returns the lower and upper bounds for a latency bucket +// number. +// +// The lower bound is inclusive, the upper bound is exclusive (except for the +// last bucket.) +func latencyBucketBounds(index int) (lower time.Duration, upper time.Duration) { + if index == 0 { + return 0, defaultLatencies[index] + } + if index == len(defaultLatencies) { + return defaultLatencies[index-1], 1<<63 - 1 + } + return defaultLatencies[index-1], defaultLatencies[index] +} diff --git a/vendor/go.opencensus.io/trace/spanstore.go b/vendor/go.opencensus.io/trace/spanstore.go new file mode 100644 index 000000000..c442d9902 --- /dev/null +++ b/vendor/go.opencensus.io/trace/spanstore.go @@ -0,0 +1,306 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + "time" + + "go.opencensus.io/internal" +) + +const ( + maxBucketSize = 100000 + defaultBucketSize = 10 +) + +var ( + ssmu sync.RWMutex // protects spanStores + spanStores = make(map[string]*spanStore) +) + +// This exists purely to avoid exposing internal methods used by z-Pages externally. +type internalOnly struct{} + +func init() { + //TODO(#412): remove + internal.Trace = &internalOnly{} +} + +// ReportActiveSpans returns the active spans for the given name. +func (i internalOnly) ReportActiveSpans(name string) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + for span := range s.active { + out = append(out, span.makeSpanData()) + } + return out +} + +// ReportSpansByError returns a sample of error spans. +// +// If code is nonzero, only spans with that status code are returned. +func (i internalOnly) ReportSpansByError(name string, code int32) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + if code != 0 { + if b, ok := s.errors[code]; ok { + for _, sd := range b.buffer { + if sd == nil { + break + } + out = append(out, sd) + } + } + } else { + for _, b := range s.errors { + for _, sd := range b.buffer { + if sd == nil { + break + } + out = append(out, sd) + } + } + } + return out +} + +// ConfigureBucketSizes sets the number of spans to keep per latency and error +// bucket for different span names. +func (i internalOnly) ConfigureBucketSizes(bcs []internal.BucketConfiguration) { + for _, bc := range bcs { + latencyBucketSize := bc.MaxRequestsSucceeded + if latencyBucketSize < 0 { + latencyBucketSize = 0 + } + if latencyBucketSize > maxBucketSize { + latencyBucketSize = maxBucketSize + } + errorBucketSize := bc.MaxRequestsErrors + if errorBucketSize < 0 { + errorBucketSize = 0 + } + if errorBucketSize > maxBucketSize { + errorBucketSize = maxBucketSize + } + spanStoreSetSize(bc.Name, latencyBucketSize, errorBucketSize) + } +} + +// ReportSpansPerMethod returns a summary of what spans are being stored for each span name. +func (i internalOnly) ReportSpansPerMethod() map[string]internal.PerMethodSummary { + out := make(map[string]internal.PerMethodSummary) + ssmu.RLock() + defer ssmu.RUnlock() + for name, s := range spanStores { + s.mu.Lock() + p := internal.PerMethodSummary{ + Active: len(s.active), + } + for code, b := range s.errors { + p.ErrorBuckets = append(p.ErrorBuckets, internal.ErrorBucketSummary{ + ErrorCode: code, + Size: b.size(), + }) + } + for i, b := range s.latency { + min, max := latencyBucketBounds(i) + p.LatencyBuckets = append(p.LatencyBuckets, internal.LatencyBucketSummary{ + MinLatency: min, + MaxLatency: max, + Size: b.size(), + }) + } + s.mu.Unlock() + out[name] = p + } + return out +} + +// ReportSpansByLatency returns a sample of successful spans. +// +// minLatency is the minimum latency of spans to be returned. +// maxLatency, if nonzero, is the maximum latency of spans to be returned. +func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency time.Duration) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + for i, b := range s.latency { + min, max := latencyBucketBounds(i) + if i+1 != len(s.latency) && max <= minLatency { + continue + } + if maxLatency != 0 && maxLatency < min { + continue + } + for _, sd := range b.buffer { + if sd == nil { + break + } + if minLatency != 0 || maxLatency != 0 { + d := sd.EndTime.Sub(sd.StartTime) + if d < minLatency { + continue + } + if maxLatency != 0 && d > maxLatency { + continue + } + } + out = append(out, sd) + } + } + return out +} + +// spanStore keeps track of spans stored for a particular span name. +// +// It contains all active spans; a sample of spans for failed requests, +// categorized by error code; and a sample of spans for successful requests, +// bucketed by latency. +type spanStore struct { + mu sync.Mutex // protects everything below. + active map[*Span]struct{} + errors map[int32]*bucket + latency []bucket + maxSpansPerErrorBucket int +} + +// newSpanStore creates a span store. +func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore { + s := &spanStore{ + active: make(map[*Span]struct{}), + latency: make([]bucket, len(defaultLatencies)+1), + maxSpansPerErrorBucket: errorBucketSize, + } + for i := range s.latency { + s.latency[i] = makeBucket(latencyBucketSize) + } + return s +} + +// spanStoreForName returns the spanStore for the given name. +// +// It returns nil if it doesn't exist. +func spanStoreForName(name string) *spanStore { + var s *spanStore + ssmu.RLock() + s, _ = spanStores[name] + ssmu.RUnlock() + return s +} + +// spanStoreForNameCreateIfNew returns the spanStore for the given name. +// +// It creates it if it didn't exist. +func spanStoreForNameCreateIfNew(name string) *spanStore { + ssmu.RLock() + s, ok := spanStores[name] + ssmu.RUnlock() + if ok { + return s + } + ssmu.Lock() + defer ssmu.Unlock() + s, ok = spanStores[name] + if ok { + return s + } + s = newSpanStore(name, defaultBucketSize, defaultBucketSize) + spanStores[name] = s + return s +} + +// spanStoreSetSize resizes the spanStore for the given name. +// +// It creates it if it didn't exist. +func spanStoreSetSize(name string, latencyBucketSize int, errorBucketSize int) { + ssmu.RLock() + s, ok := spanStores[name] + ssmu.RUnlock() + if ok { + s.resize(latencyBucketSize, errorBucketSize) + return + } + ssmu.Lock() + defer ssmu.Unlock() + s, ok = spanStores[name] + if ok { + s.resize(latencyBucketSize, errorBucketSize) + return + } + s = newSpanStore(name, latencyBucketSize, errorBucketSize) + spanStores[name] = s +} + +func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) { + s.mu.Lock() + for i := range s.latency { + s.latency[i].resize(latencyBucketSize) + } + for _, b := range s.errors { + b.resize(errorBucketSize) + } + s.maxSpansPerErrorBucket = errorBucketSize + s.mu.Unlock() +} + +// add adds a span to the active bucket of the spanStore. +func (s *spanStore) add(span *Span) { + s.mu.Lock() + s.active[span] = struct{}{} + s.mu.Unlock() +} + +// finished removes a span from the active set, and adds a corresponding +// SpanData to a latency or error bucket. +func (s *spanStore) finished(span *Span, sd *SpanData) { + latency := sd.EndTime.Sub(sd.StartTime) + if latency < 0 { + latency = 0 + } + code := sd.Status.Code + + s.mu.Lock() + delete(s.active, span) + if code == 0 { + s.latency[latencyBucket(latency)].add(sd) + } else { + if s.errors == nil { + s.errors = make(map[int32]*bucket) + } + if b := s.errors[code]; b != nil { + b.add(sd) + } else { + b := makeBucket(s.maxSpansPerErrorBucket) + s.errors[code] = &b + b.add(sd) + } + } + s.mu.Unlock() +} diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go new file mode 100644 index 000000000..49a1c0196 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace.go @@ -0,0 +1,463 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" + crand "crypto/rand" + "encoding/binary" + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + + "go.opencensus.io/internal" +) + +// Span represents a span of a trace. It has an associated SpanContext, and +// stores data accumulated while the span is active. +// +// Ideally users should interact with Spans by calling the functions in this +// package that take a Context parameter. +type Span struct { + // data contains information recorded about the span. + // + // It will be non-nil if we are exporting the span or recording events for it. + // Otherwise, data is nil, and the Span is simply a carrier for the + // SpanContext, so that the trace ID is propagated. + data *SpanData + mu sync.Mutex // protects the contents of *data (but not the pointer value.) + spanContext SpanContext + // spanStore is the spanStore this span belongs to, if any, otherwise it is nil. + *spanStore + exportOnce sync.Once +} + +// IsRecordingEvents returns true if events are being recorded for this span. +// Use this check to avoid computing expensive annotations when they will never +// be used. +func (s *Span) IsRecordingEvents() bool { + if s == nil { + return false + } + return s.data != nil +} + +// TraceOptions contains options associated with a trace span. +type TraceOptions uint32 + +// IsSampled returns true if the span will be exported. +func (sc SpanContext) IsSampled() bool { + return sc.TraceOptions.IsSampled() +} + +// setIsSampled sets the TraceOptions bit that determines whether the span will be exported. +func (sc *SpanContext) setIsSampled(sampled bool) { + if sampled { + sc.TraceOptions |= 1 + } else { + sc.TraceOptions &= ^TraceOptions(1) + } +} + +// IsSampled returns true if the span will be exported. +func (t TraceOptions) IsSampled() bool { + return t&1 == 1 +} + +// SpanContext contains the state that must propagate across process boundaries. +// +// SpanContext is not an implementation of context.Context. +// TODO: add reference to external Census docs for SpanContext. +type SpanContext struct { + TraceID TraceID + SpanID SpanID + TraceOptions TraceOptions +} + +type contextKey struct{} + +// FromContext returns the Span stored in a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Span { + s, _ := ctx.Value(contextKey{}).(*Span) + return s +} + +// WithSpan returns a new context with the given Span attached. +func WithSpan(parent context.Context, s *Span) context.Context { + return context.WithValue(parent, contextKey{}, s) +} + +// All available span kinds. Span kind must be either one of these values. +const ( + SpanKindUnspecified = iota + SpanKindServer + SpanKindClient +) + +// StartOptions contains options concerning how a span is started. +type StartOptions struct { + // Sampler to consult for this Span. If provided, it is always consulted. + // + // If not provided, then the behavior differs based on whether + // the parent of this Span is remote, local, or there is no parent. + // In the case of a remote parent or no parent, the + // default sampler (see Config) will be consulted. Otherwise, + // when there is a non-remote parent, no new sampling decision will be made: + // we will preserve the sampling of the parent. + Sampler Sampler + + // SpanKind represents the kind of a span. If none is set, + // SpanKindUnspecified is used. + SpanKind int +} + +// StartSpan starts a new child span of the current span in the context. If +// there is no span in the context, creates a new trace and span. +// +// This is provided as a convenience for WithSpan(ctx, NewSpan(...)). Use it +// if you require custom spans in addition to the default spans provided by +// ocgrpc, ochttp or similar framework integration. +func StartSpan(ctx context.Context, name string) (context.Context, *Span) { + parentSpan, _ := ctx.Value(contextKey{}).(*Span) + span := NewSpan(name, parentSpan, StartOptions{}) + return WithSpan(ctx, span), span +} + +// NewSpan returns a new span. +// +// If parent is not nil, created span will be a child of the parent. +func NewSpan(name string, parent *Span, o StartOptions) *Span { + hasParent := false + var parentSpanContext SpanContext + if parent != nil { + hasParent = true + parentSpanContext = parent.SpanContext() + } + return startSpanInternal(name, hasParent, parentSpanContext, false, o) +} + +// NewSpanWithRemoteParent returns a new span with the given parent SpanContext. +func NewSpanWithRemoteParent(name string, parent SpanContext, o StartOptions) *Span { + return startSpanInternal(name, true, parent, true, o) +} + +func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span { + span := &Span{} + span.spanContext = parent + + cfg := config.Load().(*Config) + + if !hasParent { + span.spanContext.TraceID = cfg.IDGenerator.NewTraceID() + } + span.spanContext.SpanID = cfg.IDGenerator.NewSpanID() + sampler := cfg.DefaultSampler + + if !hasParent || remoteParent || o.Sampler != nil { + // If this span is the child of a local span and no Sampler is set in the + // options, keep the parent's TraceOptions. + // + // Otherwise, consult the Sampler in the options if it is non-nil, otherwise + // the default sampler. + if o.Sampler != nil { + sampler = o.Sampler + } + span.spanContext.setIsSampled(sampler(SamplingParameters{ + ParentContext: parent, + TraceID: span.spanContext.TraceID, + SpanID: span.spanContext.SpanID, + Name: name, + HasRemoteParent: remoteParent}).Sample) + } + + if !internal.LocalSpanStoreEnabled && !span.spanContext.IsSampled() { + return span + } + + span.data = &SpanData{ + SpanContext: span.spanContext, + StartTime: time.Now(), + SpanKind: o.SpanKind, + Name: name, + HasRemoteParent: remoteParent, + } + if hasParent { + span.data.ParentSpanID = parent.SpanID + } + if internal.LocalSpanStoreEnabled { + var ss *spanStore + ss = spanStoreForNameCreateIfNew(name) + if ss != nil { + span.spanStore = ss + ss.add(span) + } + } + + return span +} + +// End ends the span. +func (s *Span) End() { + if !s.IsRecordingEvents() { + return + } + s.exportOnce.Do(func() { + // TODO: optimize to avoid this call if sd won't be used. + sd := s.makeSpanData() + sd.EndTime = internal.MonotonicEndTime(sd.StartTime) + if s.spanStore != nil { + s.spanStore.finished(s, sd) + } + if s.spanContext.IsSampled() { + // TODO: consider holding exportersMu for less time. + exportersMu.Lock() + for e := range exporters { + e.ExportSpan(sd) + } + exportersMu.Unlock() + } + }) +} + +// makeSpanData produces a SpanData representing the current state of the Span. +// It requires that s.data is non-nil. +func (s *Span) makeSpanData() *SpanData { + var sd SpanData + s.mu.Lock() + sd = *s.data + if s.data.Attributes != nil { + sd.Attributes = make(map[string]interface{}) + for k, v := range s.data.Attributes { + sd.Attributes[k] = v + } + } + s.mu.Unlock() + return &sd +} + +// SpanContext returns the SpanContext of the span. +func (s *Span) SpanContext() SpanContext { + if s == nil { + return SpanContext{} + } + return s.spanContext +} + +// SetStatus sets the status of the span, if it is recording events. +func (s *Span) SetStatus(status Status) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.Status = status + s.mu.Unlock() +} + +// AddAttributes sets attributes in the span. +// +// Existing attributes whose keys appear in the attributes parameter are overwritten. +func (s *Span) AddAttributes(attributes ...Attribute) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + if s.data.Attributes == nil { + s.data.Attributes = make(map[string]interface{}) + } + copyAttributes(s.data.Attributes, attributes) + s.mu.Unlock() +} + +// copyAttributes copies a slice of Attributes into a map. +func copyAttributes(m map[string]interface{}, attributes []Attribute) { + for _, a := range attributes { + m[a.key] = a.value + } +} + +func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...interface{}) { + now := time.Now() + msg := fmt.Sprintf(format, a...) + var m map[string]interface{} + s.mu.Lock() + if len(attributes) != 0 { + m = make(map[string]interface{}) + copyAttributes(m, attributes) + } + s.data.Annotations = append(s.data.Annotations, Annotation{ + Time: now, + Message: msg, + Attributes: m, + }) + s.mu.Unlock() +} + +func (s *Span) printStringInternal(attributes []Attribute, str string) { + now := time.Now() + var a map[string]interface{} + s.mu.Lock() + if len(attributes) != 0 { + a = make(map[string]interface{}) + copyAttributes(a, attributes) + } + s.data.Annotations = append(s.data.Annotations, Annotation{ + Time: now, + Message: str, + Attributes: a, + }) + s.mu.Unlock() +} + +// Annotate adds an annotation with attributes. +// Attributes can be nil. +func (s *Span) Annotate(attributes []Attribute, str string) { + if !s.IsRecordingEvents() { + return + } + s.printStringInternal(attributes, str) +} + +// Annotatef adds an annotation with attributes. +func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { + if !s.IsRecordingEvents() { + return + } + s.lazyPrintfInternal(attributes, format, a...) +} + +// AddMessageSendEvent adds a message send event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + now := time.Now() + s.mu.Lock() + s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{ + Time: now, + EventType: MessageEventTypeSent, + MessageID: messageID, + UncompressedByteSize: uncompressedByteSize, + CompressedByteSize: compressedByteSize, + }) + s.mu.Unlock() +} + +// AddMessageReceiveEvent adds a message receive event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + now := time.Now() + s.mu.Lock() + s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{ + Time: now, + EventType: MessageEventTypeRecv, + MessageID: messageID, + UncompressedByteSize: uncompressedByteSize, + CompressedByteSize: compressedByteSize, + }) + s.mu.Unlock() +} + +// AddLink adds a link to the span. +func (s *Span) AddLink(l Link) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.Links = append(s.data.Links, l) + s.mu.Unlock() +} + +func (s *Span) String() string { + if s == nil { + return "" + } + if s.data == nil { + return fmt.Sprintf("span %s", s.spanContext.SpanID) + } + s.mu.Lock() + str := fmt.Sprintf("span %s %q", s.spanContext.SpanID, s.data.Name) + s.mu.Unlock() + return str +} + +var config atomic.Value // access atomically + +func init() { + gen := &defaultIDGenerator{} + // initialize traceID and spanID generators. + var rngSeed int64 + for _, p := range []interface{}{ + &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, + } { + binary.Read(crand.Reader, binary.LittleEndian, p) + } + gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) + gen.spanIDInc |= 1 + + config.Store(&Config{ + DefaultSampler: ProbabilitySampler(defaultSamplingProbability), + IDGenerator: gen, + }) +} + +type defaultIDGenerator struct { + sync.Mutex + traceIDRand *rand.Rand + traceIDAdd [2]uint64 + nextSpanID uint64 + spanIDInc uint64 +} + +// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. +// mu should be held while this function is called. +func (gen *defaultIDGenerator) NewSpanID() [8]byte { + gen.Lock() + id := gen.nextSpanID + gen.nextSpanID += gen.spanIDInc + if gen.nextSpanID == 0 { + gen.nextSpanID += gen.spanIDInc + } + gen.Unlock() + var sid [8]byte + binary.LittleEndian.PutUint64(sid[:], id) + return sid +} + +// NewTraceID returns a non-zero trace ID from a randomly-chosen sequence. +// mu should be held while this function is called. +func (gen *defaultIDGenerator) NewTraceID() [16]byte { + var tid [16]byte + // Construct the trace ID from two outputs of traceIDRand, with a constant + // added to each half for additional entropy. + gen.Lock() + binary.LittleEndian.PutUint64(tid[0:8], gen.traceIDRand.Uint64()+gen.traceIDAdd[0]) + binary.LittleEndian.PutUint64(tid[8:16], gen.traceIDRand.Uint64()+gen.traceIDAdd[1]) + gen.Unlock() + return tid +} diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go new file mode 100644 index 000000000..eca1ea250 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/transport/apikey.go @@ -0,0 +1,38 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package transport contains HTTP transports used to make +// authenticated API requests. +package transport + +import ( + "errors" + "net/http" +) + +// APIKey is an HTTP Transport which wraps an underlying transport and +// appends an API Key "key" parameter to the URL of outgoing requests. +type APIKey struct { + // Key is the API Key to set on requests. + Key string + + // Transport is the underlying HTTP transport. + // If nil, http.DefaultTransport is used. + Transport http.RoundTripper +} + +func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.Transport + if rt == nil { + rt = http.DefaultTransport + if rt == nil { + return nil, errors.New("googleapi/transport: no Transport specified or available") + } + } + newReq := *req + args := newReq.URL.Query() + args.Set("key", t.Key) + newReq.URL.RawQuery = args.Encode() + return rt.RoundTrip(&newReq) +} diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go new file mode 100644 index 000000000..e5b849bff --- /dev/null +++ b/vendor/google.golang.org/api/internal/creds.go @@ -0,0 +1,45 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "context" + "fmt" + "io/ioutil" + + "golang.org/x/oauth2/google" +) + +// Creds returns credential information obtained from DialSettings, or if none, then +// it returns default credential information. +func Creds(ctx context.Context, ds *DialSettings) (*google.DefaultCredentials, error) { + if ds.Credentials != nil { + return ds.Credentials, nil + } + if ds.CredentialsJSON != nil { + return google.CredentialsFromJSON(ctx, ds.CredentialsJSON, ds.Scopes...) + } + if ds.CredentialsFile != "" { + data, err := ioutil.ReadFile(ds.CredentialsFile) + if err != nil { + return nil, fmt.Errorf("cannot read credentials file: %v", err) + } + return google.CredentialsFromJSON(ctx, data, ds.Scopes...) + } + if ds.TokenSource != nil { + return &google.DefaultCredentials{TokenSource: ds.TokenSource}, nil + } + return google.FindDefaultCredentials(ctx, ds.Scopes...) +} diff --git a/vendor/google.golang.org/api/internal/pool.go b/vendor/google.golang.org/api/internal/pool.go new file mode 100644 index 000000000..ba406247f --- /dev/null +++ b/vendor/google.golang.org/api/internal/pool.go @@ -0,0 +1,61 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "errors" + + "google.golang.org/grpc/naming" +) + +// PoolResolver provides a fixed list of addresses to load balance between +// and does not provide further updates. +type PoolResolver struct { + poolSize int + dialOpt *DialSettings + ch chan []*naming.Update +} + +// NewPoolResolver returns a PoolResolver +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func NewPoolResolver(size int, o *DialSettings) *PoolResolver { + return &PoolResolver{poolSize: size, dialOpt: o} +} + +// Resolve returns a Watcher for the endpoint defined by the DialSettings +// provided to NewPoolResolver. +func (r *PoolResolver) Resolve(target string) (naming.Watcher, error) { + if r.dialOpt.Endpoint == "" { + return nil, errors.New("No endpoint configured") + } + addrs := make([]*naming.Update, 0, r.poolSize) + for i := 0; i < r.poolSize; i++ { + addrs = append(addrs, &naming.Update{Op: naming.Add, Addr: r.dialOpt.Endpoint, Metadata: i}) + } + r.ch = make(chan []*naming.Update, 1) + r.ch <- addrs + return r, nil +} + +// Next returns a static list of updates on the first call, +// and blocks indefinitely until Close is called on subsequent calls. +func (r *PoolResolver) Next() ([]*naming.Update, error) { + return <-r.ch, nil +} + +// Close releases resources associated with the pool and causes Next to unblock. +func (r *PoolResolver) Close() { + close(r.ch) +} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go new file mode 100644 index 000000000..afabdc423 --- /dev/null +++ b/vendor/google.golang.org/api/internal/settings.go @@ -0,0 +1,81 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal supports the options and transport packages. +package internal + +import ( + "errors" + "net/http" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/grpc" +) + +// DialSettings holds information needed to establish a connection with a +// Google API service. +type DialSettings struct { + Endpoint string + Scopes []string + TokenSource oauth2.TokenSource + Credentials *google.DefaultCredentials + CredentialsFile string // if set, Token Source is ignored. + CredentialsJSON []byte + UserAgent string + APIKey string + HTTPClient *http.Client + GRPCDialOpts []grpc.DialOption + GRPCConn *grpc.ClientConn + NoAuth bool +} + +// Validate reports an error if ds is invalid. +func (ds *DialSettings) Validate() error { + hasCreds := ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "" || ds.Credentials != nil + if ds.NoAuth && hasCreds { + return errors.New("options.WithoutAuthentication is incompatible with any option that provides credentials") + } + // Credentials should not appear with other options. + // We currently allow TokenSource and CredentialsFile to coexist. + // TODO(jba): make TokenSource & CredentialsFile an error (breaking change). + nCreds := 0 + if ds.Credentials != nil { + nCreds++ + } + if ds.CredentialsJSON != nil { + nCreds++ + } + if ds.CredentialsFile != "" { + nCreds++ + } + if ds.APIKey != "" { + nCreds++ + } + if ds.TokenSource != nil { + nCreds++ + } + // Accept only one form of credentials, except we allow TokenSource and CredentialsFile for backwards compatibility. + if nCreds > 1 && !(nCreds == 2 && ds.TokenSource != nil && ds.CredentialsFile != "") { + return errors.New("multiple credential options provided") + } + if ds.HTTPClient != nil && ds.GRPCConn != nil { + return errors.New("WithHTTPClient is incompatible with WithGRPCConn") + } + if ds.HTTPClient != nil && ds.GRPCDialOpts != nil { + return errors.New("WithHTTPClient is incompatible with gRPC dial options") + } + + return nil +} diff --git a/vendor/google.golang.org/api/iterator/iterator.go b/vendor/google.golang.org/api/iterator/iterator.go new file mode 100644 index 000000000..3c8ea7732 --- /dev/null +++ b/vendor/google.golang.org/api/iterator/iterator.go @@ -0,0 +1,231 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package iterator provides support for standard Google API iterators. +// See https://github.com/GoogleCloudPlatform/gcloud-golang/wiki/Iterator-Guidelines. +package iterator + +import ( + "errors" + "fmt" + "reflect" +) + +// Done is returned by an iterator's Next method when the iteration is +// complete; when there are no more items to return. +var Done = errors.New("no more items in iterator") + +// We don't support mixed calls to Next and NextPage because they play +// with the paging state in incompatible ways. +var errMixed = errors.New("iterator: Next and NextPage called on same iterator") + +// PageInfo contains information about an iterator's paging state. +type PageInfo struct { + // Token is the token used to retrieve the next page of items from the + // API. You may set Token immediately after creating an iterator to + // begin iteration at a particular point. If Token is the empty string, + // the iterator will begin with the first eligible item. + // + // The result of setting Token after the first call to Next is undefined. + // + // After the underlying API method is called to retrieve a page of items, + // Token is set to the next-page token in the response. + Token string + + // MaxSize is the maximum number of items returned by a call to the API. + // Set MaxSize as a hint to optimize the buffering behavior of the iterator. + // If zero, the page size is determined by the underlying service. + // + // Use Pager to retrieve a page of a specific, exact size. + MaxSize int + + // The error state of the iterator. Manipulated by PageInfo.next and Pager. + // This is a latch: it starts as nil, and once set should never change. + err error + + // If true, no more calls to fetch should be made. Set to true when fetch + // returns an empty page token. The iterator is Done when this is true AND + // the buffer is empty. + atEnd bool + + // Function that fetches a page from the underlying service. It should pass + // the pageSize and pageToken arguments to the service, fill the buffer + // with the results from the call, and return the next-page token returned + // by the service. The function must not remove any existing items from the + // buffer. If the underlying RPC takes an int32 page size, pageSize should + // be silently truncated. + fetch func(pageSize int, pageToken string) (nextPageToken string, err error) + + // Function that returns the number of currently buffered items. + bufLen func() int + + // Function that returns the buffer, after setting the buffer variable to nil. + takeBuf func() interface{} + + // Set to true on first call to PageInfo.next or Pager.NextPage. Used to check + // for calls to both Next and NextPage with the same iterator. + nextCalled, nextPageCalled bool +} + +// NewPageInfo exposes internals for iterator implementations. +// It is not a stable interface. +var NewPageInfo = newPageInfo + +// If an iterator can support paging, its iterator-creating method should call +// this (via the NewPageInfo variable above). +// +// The fetch, bufLen and takeBuf arguments provide access to the +// iterator's internal slice of buffered items. They behave as described in +// PageInfo, above. +// +// The return value is the PageInfo.next method bound to the returned PageInfo value. +// (Returning it avoids exporting PageInfo.next.) +func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (*PageInfo, func() error) { + pi := &PageInfo{ + fetch: fetch, + bufLen: bufLen, + takeBuf: takeBuf, + } + return pi, pi.next +} + +// Remaining returns the number of items available before the iterator makes another API call. +func (pi *PageInfo) Remaining() int { return pi.bufLen() } + +// next provides support for an iterator's Next function. An iterator's Next +// should return the error returned by next if non-nil; else it can assume +// there is at least one item in its buffer, and it should return that item and +// remove it from the buffer. +func (pi *PageInfo) next() error { + pi.nextCalled = true + if pi.err != nil { // Once we get an error, always return it. + // TODO(jba): fix so users can retry on transient errors? Probably not worth it. + return pi.err + } + if pi.nextPageCalled { + pi.err = errMixed + return pi.err + } + // Loop until we get some items or reach the end. + for pi.bufLen() == 0 && !pi.atEnd { + if err := pi.fill(pi.MaxSize); err != nil { + pi.err = err + return pi.err + } + if pi.Token == "" { + pi.atEnd = true + } + } + // Either the buffer is non-empty or pi.atEnd is true (or both). + if pi.bufLen() == 0 { + // The buffer is empty and pi.atEnd is true, i.e. the service has no + // more items. + pi.err = Done + } + return pi.err +} + +// Call the service to fill the buffer, using size and pi.Token. Set pi.Token to the +// next-page token returned by the call. +// If fill returns a non-nil error, the buffer will be empty. +func (pi *PageInfo) fill(size int) error { + tok, err := pi.fetch(size, pi.Token) + if err != nil { + pi.takeBuf() // clear the buffer + return err + } + pi.Token = tok + return nil +} + +// Pageable is implemented by iterators that support paging. +type Pageable interface { + // PageInfo returns paging information associated with the iterator. + PageInfo() *PageInfo +} + +// Pager supports retrieving iterator items a page at a time. +type Pager struct { + pageInfo *PageInfo + pageSize int +} + +// NewPager returns a pager that uses iter. Calls to its NextPage method will +// obtain exactly pageSize items, unless fewer remain. The pageToken argument +// indicates where to start the iteration. Pass the empty string to start at +// the beginning, or pass a token retrieved from a call to Pager.NextPage. +// +// If you use an iterator with a Pager, you must not call Next on the iterator. +func NewPager(iter Pageable, pageSize int, pageToken string) *Pager { + p := &Pager{ + pageInfo: iter.PageInfo(), + pageSize: pageSize, + } + p.pageInfo.Token = pageToken + if pageSize <= 0 { + p.pageInfo.err = errors.New("iterator: page size must be positive") + } + return p +} + +// NextPage retrieves a sequence of items from the iterator and appends them +// to slicep, which must be a pointer to a slice of the iterator's item type. +// Exactly p.pageSize items will be appended, unless fewer remain. +// +// The first return value is the page token to use for the next page of items. +// If empty, there are no more pages. Aside from checking for the end of the +// iteration, the returned page token is only needed if the iteration is to be +// resumed a later time, in another context (possibly another process). +// +// The second return value is non-nil if an error occurred. It will never be +// the special iterator sentinel value Done. To recognize the end of the +// iteration, compare nextPageToken to the empty string. +// +// It is possible for NextPage to return a single zero-length page along with +// an empty page token when there are no more items in the iteration. +func (p *Pager) NextPage(slicep interface{}) (nextPageToken string, err error) { + p.pageInfo.nextPageCalled = true + if p.pageInfo.err != nil { + return "", p.pageInfo.err + } + if p.pageInfo.nextCalled { + p.pageInfo.err = errMixed + return "", p.pageInfo.err + } + if p.pageInfo.bufLen() > 0 { + return "", errors.New("must call NextPage with an empty buffer") + } + // The buffer must be empty here, so takeBuf is a no-op. We call it just to get + // the buffer's type. + wantSliceType := reflect.PtrTo(reflect.ValueOf(p.pageInfo.takeBuf()).Type()) + if slicep == nil { + return "", errors.New("nil passed to Pager.NextPage") + } + vslicep := reflect.ValueOf(slicep) + if vslicep.Type() != wantSliceType { + return "", fmt.Errorf("slicep should be of type %s, got %T", wantSliceType, slicep) + } + for p.pageInfo.bufLen() < p.pageSize { + if err := p.pageInfo.fill(p.pageSize - p.pageInfo.bufLen()); err != nil { + p.pageInfo.err = err + return "", p.pageInfo.err + } + if p.pageInfo.Token == "" { + break + } + } + e := vslicep.Elem() + e.Set(reflect.AppendSlice(e, reflect.ValueOf(p.pageInfo.takeBuf()))) + return p.pageInfo.Token, nil +} diff --git a/vendor/google.golang.org/api/option/credentials_go19.go b/vendor/google.golang.org/api/option/credentials_go19.go new file mode 100644 index 000000000..0636a8294 --- /dev/null +++ b/vendor/google.golang.org/api/option/credentials_go19.go @@ -0,0 +1,33 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.9 + +package option + +import ( + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" +) + +type withCreds google.Credentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.Credentials = (*google.Credentials)(w) +} + +// WithCredentials returns a ClientOption that authenticates API calls. +func WithCredentials(creds *google.Credentials) ClientOption { + return (*withCreds)(creds) +} diff --git a/vendor/google.golang.org/api/option/credentials_notgo19.go b/vendor/google.golang.org/api/option/credentials_notgo19.go new file mode 100644 index 000000000..74d3a4b5b --- /dev/null +++ b/vendor/google.golang.org/api/option/credentials_notgo19.go @@ -0,0 +1,32 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.9 + +package option + +import ( + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" +) + +type withCreds google.DefaultCredentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.Credentials = (*google.DefaultCredentials)(w) +} + +func WithCredentials(creds *google.DefaultCredentials) ClientOption { + return (*withCreds)(creds) +} diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go new file mode 100644 index 000000000..e7ecfe3c8 --- /dev/null +++ b/vendor/google.golang.org/api/option/option.go @@ -0,0 +1,191 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package option contains options for Google API clients. +package option + +import ( + "net/http" + + "golang.org/x/oauth2" + "google.golang.org/api/internal" + "google.golang.org/grpc" +) + +// A ClientOption is an option for a Google API client. +type ClientOption interface { + Apply(*internal.DialSettings) +} + +// WithTokenSource returns a ClientOption that specifies an OAuth2 token +// source to be used as the basis for authentication. +func WithTokenSource(s oauth2.TokenSource) ClientOption { + return withTokenSource{s} +} + +type withTokenSource struct{ ts oauth2.TokenSource } + +func (w withTokenSource) Apply(o *internal.DialSettings) { + o.TokenSource = w.ts +} + +type withCredFile string + +func (w withCredFile) Apply(o *internal.DialSettings) { + o.CredentialsFile = string(w) +} + +// WithCredentialsFile returns a ClientOption that authenticates +// API calls with the given service account or refresh token JSON +// credentials file. +func WithCredentialsFile(filename string) ClientOption { + return withCredFile(filename) +} + +// WithServiceAccountFile returns a ClientOption that uses a Google service +// account credentials file to authenticate. +// +// Deprecated: Use WithCredentialsFile instead. +func WithServiceAccountFile(filename string) ClientOption { + return WithCredentialsFile(filename) +} + +// WithCredentialsJSON returns a ClientOption that authenticates +// API calls with the given service account or refresh token JSON +// credentials. +func WithCredentialsJSON(p []byte) ClientOption { + return withCredentialsJSON(p) +} + +type withCredentialsJSON []byte + +func (w withCredentialsJSON) Apply(o *internal.DialSettings) { + o.CredentialsJSON = make([]byte, len(w)) + copy(o.CredentialsJSON, w) +} + +// WithEndpoint returns a ClientOption that overrides the default endpoint +// to be used for a service. +func WithEndpoint(url string) ClientOption { + return withEndpoint(url) +} + +type withEndpoint string + +func (w withEndpoint) Apply(o *internal.DialSettings) { + o.Endpoint = string(w) +} + +// WithScopes returns a ClientOption that overrides the default OAuth2 scopes +// to be used for a service. +func WithScopes(scope ...string) ClientOption { + return withScopes(scope) +} + +type withScopes []string + +func (w withScopes) Apply(o *internal.DialSettings) { + o.Scopes = make([]string, len(w)) + copy(o.Scopes, w) +} + +// WithUserAgent returns a ClientOption that sets the User-Agent. +func WithUserAgent(ua string) ClientOption { + return withUA(ua) +} + +type withUA string + +func (w withUA) Apply(o *internal.DialSettings) { o.UserAgent = string(w) } + +// WithHTTPClient returns a ClientOption that specifies the HTTP client to use +// as the basis of communications. This option may only be used with services +// that support HTTP as their communication transport. When used, the +// WithHTTPClient option takes precedent over all other supplied options. +func WithHTTPClient(client *http.Client) ClientOption { + return withHTTPClient{client} +} + +type withHTTPClient struct{ client *http.Client } + +func (w withHTTPClient) Apply(o *internal.DialSettings) { + o.HTTPClient = w.client +} + +// WithGRPCConn returns a ClientOption that specifies the gRPC client +// connection to use as the basis of communications. This option many only be +// used with services that support gRPC as their communication transport. When +// used, the WithGRPCConn option takes precedent over all other supplied +// options. +func WithGRPCConn(conn *grpc.ClientConn) ClientOption { + return withGRPCConn{conn} +} + +type withGRPCConn struct{ conn *grpc.ClientConn } + +func (w withGRPCConn) Apply(o *internal.DialSettings) { + o.GRPCConn = w.conn +} + +// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption +// to an underlying gRPC dial. It does not work with WithGRPCConn. +func WithGRPCDialOption(opt grpc.DialOption) ClientOption { + return withGRPCDialOption{opt} +} + +type withGRPCDialOption struct{ opt grpc.DialOption } + +func (w withGRPCDialOption) Apply(o *internal.DialSettings) { + o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt) +} + +// WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC +// connections that requests will be balanced between. +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func WithGRPCConnectionPool(size int) ClientOption { + return withGRPCConnectionPool(size) +} + +type withGRPCConnectionPool int + +func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) { + balancer := grpc.RoundRobin(internal.NewPoolResolver(int(w), o)) + o.GRPCDialOpts = append(o.GRPCDialOpts, grpc.WithBalancer(balancer)) +} + +// WithAPIKey returns a ClientOption that specifies an API key to be used +// as the basis for authentication. +// +// API Keys can only be used for JSON-over-HTTP APIs, including those under +// the import path google.golang.org/api/.... +func WithAPIKey(apiKey string) ClientOption { + return withAPIKey(apiKey) +} + +type withAPIKey string + +func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) } + +// WithoutAuthentication returns a ClientOption that specifies that no +// authentication should be used. It is suitable only for testing and for +// accessing public resources, like public Google Cloud Storage buckets. +// It is an error to provide both WithoutAuthentication and any of WithAPIKey, +// WithTokenSource, WithCredentialsFile or WithServiceAccountFile. +func WithoutAuthentication() ClientOption { + return withoutAuthentication{} +} + +type withoutAuthentication struct{} + +func (w withoutAuthentication) Apply(o *internal.DialSettings) { o.NoAuth = true } diff --git a/vendor/google.golang.org/api/transport/dial.go b/vendor/google.golang.org/api/transport/dial.go new file mode 100644 index 000000000..cf1ffca70 --- /dev/null +++ b/vendor/google.golang.org/api/transport/dial.go @@ -0,0 +1,49 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport supports network connections to HTTP and GRPC servers. +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package transport + +import ( + "context" + "net/http" + + "google.golang.org/grpc" + + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + htransport "google.golang.org/api/transport/http" +) + +// NewHTTPClient returns an HTTP client for use communicating with a Google cloud +// service, configured with the given ClientOptions. It also returns the endpoint +// for the service as specified in the options. +func NewHTTPClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) { + return htransport.NewClient(ctx, opts...) +} + +// DialGRPC returns a GRPC connection for use communicating with a Google cloud +// service, configured with the given ClientOptions. +func DialGRPC(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + return gtransport.Dial(ctx, opts...) +} + +// DialGRPCInsecure returns an insecure GRPC connection for use communicating +// with fake or mock Google cloud service implementations, such as emulators. +// The connection is configured with the given ClientOptions. +func DialGRPCInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + return gtransport.DialInsecure(ctx, opts...) +} diff --git a/vendor/google.golang.org/api/transport/go19.go b/vendor/google.golang.org/api/transport/go19.go new file mode 100644 index 000000000..3e89f9328 --- /dev/null +++ b/vendor/google.golang.org/api/transport/go19.go @@ -0,0 +1,35 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.9 + +package transport + +import ( + "context" + + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" + "google.golang.org/api/option" +) + +// Creds constructs a google.Credentials from the information in the options, +// or obtains the default credentials in the same way as google.FindDefaultCredentials. +func Creds(ctx context.Context, opts ...option.ClientOption) (*google.Credentials, error) { + var ds internal.DialSettings + for _, opt := range opts { + opt.Apply(&ds) + } + return internal.Creds(ctx, &ds) +} diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go new file mode 100644 index 000000000..62360f6c4 --- /dev/null +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -0,0 +1,98 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package grpc supports network connections to GRPC servers. +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package grpc + +import ( + "context" + "errors" + "log" + + "go.opencensus.io/plugin/ocgrpc" + "google.golang.org/api/internal" + "google.golang.org/api/option" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/oauth" +) + +// Set at init time by dial_appengine.go. If nil, we're not on App Engine. +var appengineDialerHook func(context.Context) grpc.DialOption + +// Dial returns a GRPC connection for use communicating with a Google cloud +// service, configured with the given ClientOptions. +func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + return dial(ctx, false, opts) +} + +// DialInsecure returns an insecure GRPC connection for use communicating +// with fake or mock Google cloud service implementations, such as emulators. +// The connection is configured with the given ClientOptions. +func DialInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + return dial(ctx, true, opts) +} + +func dial(ctx context.Context, insecure bool, opts []option.ClientOption) (*grpc.ClientConn, error) { + var o internal.DialSettings + for _, opt := range opts { + opt.Apply(&o) + } + if err := o.Validate(); err != nil { + return nil, err + } + if o.HTTPClient != nil { + return nil, errors.New("unsupported HTTP client specified") + } + if o.GRPCConn != nil { + return o.GRPCConn, nil + } + grpcOpts := []grpc.DialOption{ + grpc.WithWaitForHandshake(), + } + if insecure { + grpcOpts = []grpc.DialOption{grpc.WithInsecure()} + } else if !o.NoAuth { + if o.APIKey != "" { + log.Print("API keys are not supported for gRPC APIs. Remove the WithAPIKey option from your client-creating call.") + } + creds, err := internal.Creds(ctx, &o) + if err != nil { + return nil, err + } + grpcOpts = []grpc.DialOption{ + grpc.WithPerRPCCredentials(oauth.TokenSource{creds.TokenSource}), + grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")), + } + } + if appengineDialerHook != nil { + // Use the Socket API on App Engine. + grpcOpts = append(grpcOpts, appengineDialerHook(ctx)) + } + // Add tracing, but before the other options, so that clients can override the + // gRPC stats handler. + // This assumes that gRPC options are processed in order, left to right. + grpcOpts = addOCStatsHandler(grpcOpts) + grpcOpts = append(grpcOpts, o.GRPCDialOpts...) + if o.UserAgent != "" { + grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent)) + } + return grpc.DialContext(ctx, o.Endpoint, grpcOpts...) +} + +func addOCStatsHandler(opts []grpc.DialOption) []grpc.DialOption { + return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) +} diff --git a/vendor/google.golang.org/api/transport/grpc/dial_appengine.go b/vendor/google.golang.org/api/transport/grpc/dial_appengine.go new file mode 100644 index 000000000..87819d4e1 --- /dev/null +++ b/vendor/google.golang.org/api/transport/grpc/dial_appengine.go @@ -0,0 +1,41 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build appengine + +package grpc + +import ( + "context" + "net" + "time" + + "google.golang.org/appengine" + "google.golang.org/appengine/socket" + "google.golang.org/grpc" +) + +func init() { + // NOTE: dev_appserver doesn't currently support SSL. + // When it does, this code can be removed. + if appengine.IsDevAppServer() { + return + } + + appengineDialerHook = func(ctx context.Context) grpc.DialOption { + return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return socket.DialTimeout(ctx, "tcp", addr, timeout) + }) + } +} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go new file mode 100644 index 000000000..a25da6741 --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -0,0 +1,147 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package http supports network connections to HTTP servers. +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package http + +import ( + "context" + "errors" + "net/http" + + "go.opencensus.io/plugin/ochttp" + "golang.org/x/oauth2" + "google.golang.org/api/googleapi/transport" + "google.golang.org/api/internal" + "google.golang.org/api/option" + "google.golang.org/api/transport/http/internal/propagation" +) + +// NewClient returns an HTTP client for use communicating with a Google cloud +// service, configured with the given ClientOptions. It also returns the endpoint +// for the service as specified in the options. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) { + settings, err := newSettings(opts) + if err != nil { + return nil, "", err + } + // TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided? + if settings.HTTPClient != nil { + return settings.HTTPClient, settings.Endpoint, nil + } + trans, err := newTransport(ctx, defaultBaseTransport(ctx), settings) + if err != nil { + return nil, "", err + } + return &http.Client{Transport: trans}, settings.Endpoint, nil +} + +// NewTransport creates an http.RoundTripper for use communicating with a Google +// cloud service, configured with the given ClientOptions. Its RoundTrip method delegates to base. +func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.ClientOption) (http.RoundTripper, error) { + settings, err := newSettings(opts) + if err != nil { + return nil, err + } + if settings.HTTPClient != nil { + return nil, errors.New("transport/http: WithHTTPClient passed to NewTransport") + } + return newTransport(ctx, base, settings) +} + +func newTransport(ctx context.Context, base http.RoundTripper, settings *internal.DialSettings) (http.RoundTripper, error) { + trans := base + trans = userAgentTransport{ + base: trans, + userAgent: settings.UserAgent, + } + trans = addOCTransport(trans) + switch { + case settings.NoAuth: + // Do nothing. + case settings.APIKey != "": + trans = &transport.APIKey{ + Transport: trans, + Key: settings.APIKey, + } + default: + creds, err := internal.Creds(ctx, settings) + if err != nil { + return nil, err + } + trans = &oauth2.Transport{ + Base: trans, + Source: creds.TokenSource, + } + } + return trans, nil +} + +func newSettings(opts []option.ClientOption) (*internal.DialSettings, error) { + var o internal.DialSettings + for _, opt := range opts { + opt.Apply(&o) + } + if err := o.Validate(); err != nil { + return nil, err + } + if o.GRPCConn != nil { + return nil, errors.New("unsupported gRPC connection specified") + } + return &o, nil +} + +type userAgentTransport struct { + userAgent string + base http.RoundTripper +} + +func (t userAgentTransport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base + if rt == nil { + return nil, errors.New("transport: no Transport specified") + } + if t.userAgent == "" { + return rt.RoundTrip(req) + } + newReq := *req + newReq.Header = make(http.Header) + for k, vv := range req.Header { + newReq.Header[k] = vv + } + // TODO(cbro): append to existing User-Agent header? + newReq.Header["User-Agent"] = []string{t.userAgent} + return rt.RoundTrip(&newReq) +} + +// Set at init time by dial_appengine.go. If nil, we're not on App Engine. +var appengineUrlfetchHook func(context.Context) http.RoundTripper + +// defaultBaseTransport returns the base HTTP transport. +// On App Engine, this is urlfetch.Transport, otherwise it's http.DefaultTransport. +func defaultBaseTransport(ctx context.Context) http.RoundTripper { + if appengineUrlfetchHook != nil { + return appengineUrlfetchHook(ctx) + } + return http.DefaultTransport +} + +func addOCTransport(trans http.RoundTripper) http.RoundTripper { + return &ochttp.Transport{ + Base: trans, + Propagation: &propagation.HTTPFormat{}, + } +} diff --git a/vendor/google.golang.org/api/transport/http/dial_appengine.go b/vendor/google.golang.org/api/transport/http/dial_appengine.go new file mode 100644 index 000000000..04c81413c --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/dial_appengine.go @@ -0,0 +1,30 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build appengine + +package http + +import ( + "context" + "net/http" + + "google.golang.org/appengine/urlfetch" +) + +func init() { + appengineUrlfetchHook = func(ctx context.Context) http.RoundTripper { + return &urlfetch.Transport{Context: ctx} + } +} diff --git a/vendor/google.golang.org/api/transport/http/internal/propagation/http.go b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go new file mode 100644 index 000000000..24b4f0d29 --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go @@ -0,0 +1,96 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +// Package propagation implements X-Cloud-Trace-Context header propagation used +// by Google Cloud products. +package propagation + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "net/http" + "strconv" + "strings" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +const ( + httpHeaderMaxSize = 200 + httpHeader = `X-Cloud-Trace-Context` +) + +var _ propagation.HTTPFormat = (*HTTPFormat)(nil) + +// HTTPFormat implements propagation.HTTPFormat to propagate +// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace. +type HTTPFormat struct{} + +// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests. +func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + h := req.Header.Get(httpHeader) + // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. + // Return if the header is empty or missing, or if the header is unreasonably + // large, to avoid making unnecessary copies of a large string. + if h == "" || len(h) > httpHeaderMaxSize { + return trace.SpanContext{}, false + } + + // Parse the trace id field. + slash := strings.Index(h, `/`) + if slash == -1 { + return trace.SpanContext{}, false + } + tid, h := h[:slash], h[slash+1:] + + buf, err := hex.DecodeString(tid) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.TraceID[:], buf) + + // Parse the span id field. + spanstr := h + semicolon := strings.Index(h, `;`) + if semicolon != -1 { + spanstr, h = h[:semicolon], h[semicolon+1:] + } + sid, err := strconv.ParseUint(spanstr, 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + binary.BigEndian.PutUint64(sc.SpanID[:], sid) + + // Parse the options field, options field is optional. + if !strings.HasPrefix(h, "o=") { + return sc, true + } + o, err := strconv.ParseUint(h[2:], 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + sc.TraceOptions = trace.TraceOptions(o) + return sc, true +} + +// SpanContextToRequest modifies the given request to include a Stackdriver Trace header. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + sid := binary.BigEndian.Uint64(sc.SpanID[:]) + header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) + req.Header.Set(httpHeader, header) +} diff --git a/vendor/google.golang.org/api/transport/not_go19.go b/vendor/google.golang.org/api/transport/not_go19.go new file mode 100644 index 000000000..0cb627594 --- /dev/null +++ b/vendor/google.golang.org/api/transport/not_go19.go @@ -0,0 +1,35 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.9 + +package transport + +import ( + "context" + + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" + "google.golang.org/api/option" +) + +// Creds constructs a google.DefaultCredentials from the information in the options, +// or obtains the default credentials in the same way as google.FindDefaultCredentials. +func Creds(ctx context.Context, opts ...option.ClientOption) (*google.DefaultCredentials, error) { + var ds internal.DialSettings + for _, opt := range opts { + opt.Apply(&ds) + } + return internal.Creds(ctx, &ds) +} diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go new file mode 100644 index 000000000..60628ec9b --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go @@ -0,0 +1,1858 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/socket/socket_service.proto +// DO NOT EDIT! + +/* +Package socket is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/socket/socket_service.proto + +It has these top-level messages: + RemoteSocketServiceError + AddressPort + CreateSocketRequest + CreateSocketReply + BindRequest + BindReply + GetSocketNameRequest + GetSocketNameReply + GetPeerNameRequest + GetPeerNameReply + SocketOption + SetSocketOptionsRequest + SetSocketOptionsReply + GetSocketOptionsRequest + GetSocketOptionsReply + ConnectRequest + ConnectReply + ListenRequest + ListenReply + AcceptRequest + AcceptReply + ShutDownRequest + ShutDownReply + CloseRequest + CloseReply + SendRequest + SendReply + ReceiveRequest + ReceiveReply + PollEvent + PollRequest + PollReply + ResolveRequest + ResolveReply +*/ +package socket + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type RemoteSocketServiceError_ErrorCode int32 + +const ( + RemoteSocketServiceError_SYSTEM_ERROR RemoteSocketServiceError_ErrorCode = 1 + RemoteSocketServiceError_GAI_ERROR RemoteSocketServiceError_ErrorCode = 2 + RemoteSocketServiceError_FAILURE RemoteSocketServiceError_ErrorCode = 4 + RemoteSocketServiceError_PERMISSION_DENIED RemoteSocketServiceError_ErrorCode = 5 + RemoteSocketServiceError_INVALID_REQUEST RemoteSocketServiceError_ErrorCode = 6 + RemoteSocketServiceError_SOCKET_CLOSED RemoteSocketServiceError_ErrorCode = 7 +) + +var RemoteSocketServiceError_ErrorCode_name = map[int32]string{ + 1: "SYSTEM_ERROR", + 2: "GAI_ERROR", + 4: "FAILURE", + 5: "PERMISSION_DENIED", + 6: "INVALID_REQUEST", + 7: "SOCKET_CLOSED", +} +var RemoteSocketServiceError_ErrorCode_value = map[string]int32{ + "SYSTEM_ERROR": 1, + "GAI_ERROR": 2, + "FAILURE": 4, + "PERMISSION_DENIED": 5, + "INVALID_REQUEST": 6, + "SOCKET_CLOSED": 7, +} + +func (x RemoteSocketServiceError_ErrorCode) Enum() *RemoteSocketServiceError_ErrorCode { + p := new(RemoteSocketServiceError_ErrorCode) + *p = x + return p +} +func (x RemoteSocketServiceError_ErrorCode) String() string { + return proto.EnumName(RemoteSocketServiceError_ErrorCode_name, int32(x)) +} +func (x *RemoteSocketServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_ErrorCode_value, data, "RemoteSocketServiceError_ErrorCode") + if err != nil { + return err + } + *x = RemoteSocketServiceError_ErrorCode(value) + return nil +} + +type RemoteSocketServiceError_SystemError int32 + +const ( + RemoteSocketServiceError_SYS_SUCCESS RemoteSocketServiceError_SystemError = 0 + RemoteSocketServiceError_SYS_EPERM RemoteSocketServiceError_SystemError = 1 + RemoteSocketServiceError_SYS_ENOENT RemoteSocketServiceError_SystemError = 2 + RemoteSocketServiceError_SYS_ESRCH RemoteSocketServiceError_SystemError = 3 + RemoteSocketServiceError_SYS_EINTR RemoteSocketServiceError_SystemError = 4 + RemoteSocketServiceError_SYS_EIO RemoteSocketServiceError_SystemError = 5 + RemoteSocketServiceError_SYS_ENXIO RemoteSocketServiceError_SystemError = 6 + RemoteSocketServiceError_SYS_E2BIG RemoteSocketServiceError_SystemError = 7 + RemoteSocketServiceError_SYS_ENOEXEC RemoteSocketServiceError_SystemError = 8 + RemoteSocketServiceError_SYS_EBADF RemoteSocketServiceError_SystemError = 9 + RemoteSocketServiceError_SYS_ECHILD RemoteSocketServiceError_SystemError = 10 + RemoteSocketServiceError_SYS_EAGAIN RemoteSocketServiceError_SystemError = 11 + RemoteSocketServiceError_SYS_EWOULDBLOCK RemoteSocketServiceError_SystemError = 11 + RemoteSocketServiceError_SYS_ENOMEM RemoteSocketServiceError_SystemError = 12 + RemoteSocketServiceError_SYS_EACCES RemoteSocketServiceError_SystemError = 13 + RemoteSocketServiceError_SYS_EFAULT RemoteSocketServiceError_SystemError = 14 + RemoteSocketServiceError_SYS_ENOTBLK RemoteSocketServiceError_SystemError = 15 + RemoteSocketServiceError_SYS_EBUSY RemoteSocketServiceError_SystemError = 16 + RemoteSocketServiceError_SYS_EEXIST RemoteSocketServiceError_SystemError = 17 + RemoteSocketServiceError_SYS_EXDEV RemoteSocketServiceError_SystemError = 18 + RemoteSocketServiceError_SYS_ENODEV RemoteSocketServiceError_SystemError = 19 + RemoteSocketServiceError_SYS_ENOTDIR RemoteSocketServiceError_SystemError = 20 + RemoteSocketServiceError_SYS_EISDIR RemoteSocketServiceError_SystemError = 21 + RemoteSocketServiceError_SYS_EINVAL RemoteSocketServiceError_SystemError = 22 + RemoteSocketServiceError_SYS_ENFILE RemoteSocketServiceError_SystemError = 23 + RemoteSocketServiceError_SYS_EMFILE RemoteSocketServiceError_SystemError = 24 + RemoteSocketServiceError_SYS_ENOTTY RemoteSocketServiceError_SystemError = 25 + RemoteSocketServiceError_SYS_ETXTBSY RemoteSocketServiceError_SystemError = 26 + RemoteSocketServiceError_SYS_EFBIG RemoteSocketServiceError_SystemError = 27 + RemoteSocketServiceError_SYS_ENOSPC RemoteSocketServiceError_SystemError = 28 + RemoteSocketServiceError_SYS_ESPIPE RemoteSocketServiceError_SystemError = 29 + RemoteSocketServiceError_SYS_EROFS RemoteSocketServiceError_SystemError = 30 + RemoteSocketServiceError_SYS_EMLINK RemoteSocketServiceError_SystemError = 31 + RemoteSocketServiceError_SYS_EPIPE RemoteSocketServiceError_SystemError = 32 + RemoteSocketServiceError_SYS_EDOM RemoteSocketServiceError_SystemError = 33 + RemoteSocketServiceError_SYS_ERANGE RemoteSocketServiceError_SystemError = 34 + RemoteSocketServiceError_SYS_EDEADLK RemoteSocketServiceError_SystemError = 35 + RemoteSocketServiceError_SYS_EDEADLOCK RemoteSocketServiceError_SystemError = 35 + RemoteSocketServiceError_SYS_ENAMETOOLONG RemoteSocketServiceError_SystemError = 36 + RemoteSocketServiceError_SYS_ENOLCK RemoteSocketServiceError_SystemError = 37 + RemoteSocketServiceError_SYS_ENOSYS RemoteSocketServiceError_SystemError = 38 + RemoteSocketServiceError_SYS_ENOTEMPTY RemoteSocketServiceError_SystemError = 39 + RemoteSocketServiceError_SYS_ELOOP RemoteSocketServiceError_SystemError = 40 + RemoteSocketServiceError_SYS_ENOMSG RemoteSocketServiceError_SystemError = 42 + RemoteSocketServiceError_SYS_EIDRM RemoteSocketServiceError_SystemError = 43 + RemoteSocketServiceError_SYS_ECHRNG RemoteSocketServiceError_SystemError = 44 + RemoteSocketServiceError_SYS_EL2NSYNC RemoteSocketServiceError_SystemError = 45 + RemoteSocketServiceError_SYS_EL3HLT RemoteSocketServiceError_SystemError = 46 + RemoteSocketServiceError_SYS_EL3RST RemoteSocketServiceError_SystemError = 47 + RemoteSocketServiceError_SYS_ELNRNG RemoteSocketServiceError_SystemError = 48 + RemoteSocketServiceError_SYS_EUNATCH RemoteSocketServiceError_SystemError = 49 + RemoteSocketServiceError_SYS_ENOCSI RemoteSocketServiceError_SystemError = 50 + RemoteSocketServiceError_SYS_EL2HLT RemoteSocketServiceError_SystemError = 51 + RemoteSocketServiceError_SYS_EBADE RemoteSocketServiceError_SystemError = 52 + RemoteSocketServiceError_SYS_EBADR RemoteSocketServiceError_SystemError = 53 + RemoteSocketServiceError_SYS_EXFULL RemoteSocketServiceError_SystemError = 54 + RemoteSocketServiceError_SYS_ENOANO RemoteSocketServiceError_SystemError = 55 + RemoteSocketServiceError_SYS_EBADRQC RemoteSocketServiceError_SystemError = 56 + RemoteSocketServiceError_SYS_EBADSLT RemoteSocketServiceError_SystemError = 57 + RemoteSocketServiceError_SYS_EBFONT RemoteSocketServiceError_SystemError = 59 + RemoteSocketServiceError_SYS_ENOSTR RemoteSocketServiceError_SystemError = 60 + RemoteSocketServiceError_SYS_ENODATA RemoteSocketServiceError_SystemError = 61 + RemoteSocketServiceError_SYS_ETIME RemoteSocketServiceError_SystemError = 62 + RemoteSocketServiceError_SYS_ENOSR RemoteSocketServiceError_SystemError = 63 + RemoteSocketServiceError_SYS_ENONET RemoteSocketServiceError_SystemError = 64 + RemoteSocketServiceError_SYS_ENOPKG RemoteSocketServiceError_SystemError = 65 + RemoteSocketServiceError_SYS_EREMOTE RemoteSocketServiceError_SystemError = 66 + RemoteSocketServiceError_SYS_ENOLINK RemoteSocketServiceError_SystemError = 67 + RemoteSocketServiceError_SYS_EADV RemoteSocketServiceError_SystemError = 68 + RemoteSocketServiceError_SYS_ESRMNT RemoteSocketServiceError_SystemError = 69 + RemoteSocketServiceError_SYS_ECOMM RemoteSocketServiceError_SystemError = 70 + RemoteSocketServiceError_SYS_EPROTO RemoteSocketServiceError_SystemError = 71 + RemoteSocketServiceError_SYS_EMULTIHOP RemoteSocketServiceError_SystemError = 72 + RemoteSocketServiceError_SYS_EDOTDOT RemoteSocketServiceError_SystemError = 73 + RemoteSocketServiceError_SYS_EBADMSG RemoteSocketServiceError_SystemError = 74 + RemoteSocketServiceError_SYS_EOVERFLOW RemoteSocketServiceError_SystemError = 75 + RemoteSocketServiceError_SYS_ENOTUNIQ RemoteSocketServiceError_SystemError = 76 + RemoteSocketServiceError_SYS_EBADFD RemoteSocketServiceError_SystemError = 77 + RemoteSocketServiceError_SYS_EREMCHG RemoteSocketServiceError_SystemError = 78 + RemoteSocketServiceError_SYS_ELIBACC RemoteSocketServiceError_SystemError = 79 + RemoteSocketServiceError_SYS_ELIBBAD RemoteSocketServiceError_SystemError = 80 + RemoteSocketServiceError_SYS_ELIBSCN RemoteSocketServiceError_SystemError = 81 + RemoteSocketServiceError_SYS_ELIBMAX RemoteSocketServiceError_SystemError = 82 + RemoteSocketServiceError_SYS_ELIBEXEC RemoteSocketServiceError_SystemError = 83 + RemoteSocketServiceError_SYS_EILSEQ RemoteSocketServiceError_SystemError = 84 + RemoteSocketServiceError_SYS_ERESTART RemoteSocketServiceError_SystemError = 85 + RemoteSocketServiceError_SYS_ESTRPIPE RemoteSocketServiceError_SystemError = 86 + RemoteSocketServiceError_SYS_EUSERS RemoteSocketServiceError_SystemError = 87 + RemoteSocketServiceError_SYS_ENOTSOCK RemoteSocketServiceError_SystemError = 88 + RemoteSocketServiceError_SYS_EDESTADDRREQ RemoteSocketServiceError_SystemError = 89 + RemoteSocketServiceError_SYS_EMSGSIZE RemoteSocketServiceError_SystemError = 90 + RemoteSocketServiceError_SYS_EPROTOTYPE RemoteSocketServiceError_SystemError = 91 + RemoteSocketServiceError_SYS_ENOPROTOOPT RemoteSocketServiceError_SystemError = 92 + RemoteSocketServiceError_SYS_EPROTONOSUPPORT RemoteSocketServiceError_SystemError = 93 + RemoteSocketServiceError_SYS_ESOCKTNOSUPPORT RemoteSocketServiceError_SystemError = 94 + RemoteSocketServiceError_SYS_EOPNOTSUPP RemoteSocketServiceError_SystemError = 95 + RemoteSocketServiceError_SYS_ENOTSUP RemoteSocketServiceError_SystemError = 95 + RemoteSocketServiceError_SYS_EPFNOSUPPORT RemoteSocketServiceError_SystemError = 96 + RemoteSocketServiceError_SYS_EAFNOSUPPORT RemoteSocketServiceError_SystemError = 97 + RemoteSocketServiceError_SYS_EADDRINUSE RemoteSocketServiceError_SystemError = 98 + RemoteSocketServiceError_SYS_EADDRNOTAVAIL RemoteSocketServiceError_SystemError = 99 + RemoteSocketServiceError_SYS_ENETDOWN RemoteSocketServiceError_SystemError = 100 + RemoteSocketServiceError_SYS_ENETUNREACH RemoteSocketServiceError_SystemError = 101 + RemoteSocketServiceError_SYS_ENETRESET RemoteSocketServiceError_SystemError = 102 + RemoteSocketServiceError_SYS_ECONNABORTED RemoteSocketServiceError_SystemError = 103 + RemoteSocketServiceError_SYS_ECONNRESET RemoteSocketServiceError_SystemError = 104 + RemoteSocketServiceError_SYS_ENOBUFS RemoteSocketServiceError_SystemError = 105 + RemoteSocketServiceError_SYS_EISCONN RemoteSocketServiceError_SystemError = 106 + RemoteSocketServiceError_SYS_ENOTCONN RemoteSocketServiceError_SystemError = 107 + RemoteSocketServiceError_SYS_ESHUTDOWN RemoteSocketServiceError_SystemError = 108 + RemoteSocketServiceError_SYS_ETOOMANYREFS RemoteSocketServiceError_SystemError = 109 + RemoteSocketServiceError_SYS_ETIMEDOUT RemoteSocketServiceError_SystemError = 110 + RemoteSocketServiceError_SYS_ECONNREFUSED RemoteSocketServiceError_SystemError = 111 + RemoteSocketServiceError_SYS_EHOSTDOWN RemoteSocketServiceError_SystemError = 112 + RemoteSocketServiceError_SYS_EHOSTUNREACH RemoteSocketServiceError_SystemError = 113 + RemoteSocketServiceError_SYS_EALREADY RemoteSocketServiceError_SystemError = 114 + RemoteSocketServiceError_SYS_EINPROGRESS RemoteSocketServiceError_SystemError = 115 + RemoteSocketServiceError_SYS_ESTALE RemoteSocketServiceError_SystemError = 116 + RemoteSocketServiceError_SYS_EUCLEAN RemoteSocketServiceError_SystemError = 117 + RemoteSocketServiceError_SYS_ENOTNAM RemoteSocketServiceError_SystemError = 118 + RemoteSocketServiceError_SYS_ENAVAIL RemoteSocketServiceError_SystemError = 119 + RemoteSocketServiceError_SYS_EISNAM RemoteSocketServiceError_SystemError = 120 + RemoteSocketServiceError_SYS_EREMOTEIO RemoteSocketServiceError_SystemError = 121 + RemoteSocketServiceError_SYS_EDQUOT RemoteSocketServiceError_SystemError = 122 + RemoteSocketServiceError_SYS_ENOMEDIUM RemoteSocketServiceError_SystemError = 123 + RemoteSocketServiceError_SYS_EMEDIUMTYPE RemoteSocketServiceError_SystemError = 124 + RemoteSocketServiceError_SYS_ECANCELED RemoteSocketServiceError_SystemError = 125 + RemoteSocketServiceError_SYS_ENOKEY RemoteSocketServiceError_SystemError = 126 + RemoteSocketServiceError_SYS_EKEYEXPIRED RemoteSocketServiceError_SystemError = 127 + RemoteSocketServiceError_SYS_EKEYREVOKED RemoteSocketServiceError_SystemError = 128 + RemoteSocketServiceError_SYS_EKEYREJECTED RemoteSocketServiceError_SystemError = 129 + RemoteSocketServiceError_SYS_EOWNERDEAD RemoteSocketServiceError_SystemError = 130 + RemoteSocketServiceError_SYS_ENOTRECOVERABLE RemoteSocketServiceError_SystemError = 131 + RemoteSocketServiceError_SYS_ERFKILL RemoteSocketServiceError_SystemError = 132 +) + +var RemoteSocketServiceError_SystemError_name = map[int32]string{ + 0: "SYS_SUCCESS", + 1: "SYS_EPERM", + 2: "SYS_ENOENT", + 3: "SYS_ESRCH", + 4: "SYS_EINTR", + 5: "SYS_EIO", + 6: "SYS_ENXIO", + 7: "SYS_E2BIG", + 8: "SYS_ENOEXEC", + 9: "SYS_EBADF", + 10: "SYS_ECHILD", + 11: "SYS_EAGAIN", + // Duplicate value: 11: "SYS_EWOULDBLOCK", + 12: "SYS_ENOMEM", + 13: "SYS_EACCES", + 14: "SYS_EFAULT", + 15: "SYS_ENOTBLK", + 16: "SYS_EBUSY", + 17: "SYS_EEXIST", + 18: "SYS_EXDEV", + 19: "SYS_ENODEV", + 20: "SYS_ENOTDIR", + 21: "SYS_EISDIR", + 22: "SYS_EINVAL", + 23: "SYS_ENFILE", + 24: "SYS_EMFILE", + 25: "SYS_ENOTTY", + 26: "SYS_ETXTBSY", + 27: "SYS_EFBIG", + 28: "SYS_ENOSPC", + 29: "SYS_ESPIPE", + 30: "SYS_EROFS", + 31: "SYS_EMLINK", + 32: "SYS_EPIPE", + 33: "SYS_EDOM", + 34: "SYS_ERANGE", + 35: "SYS_EDEADLK", + // Duplicate value: 35: "SYS_EDEADLOCK", + 36: "SYS_ENAMETOOLONG", + 37: "SYS_ENOLCK", + 38: "SYS_ENOSYS", + 39: "SYS_ENOTEMPTY", + 40: "SYS_ELOOP", + 42: "SYS_ENOMSG", + 43: "SYS_EIDRM", + 44: "SYS_ECHRNG", + 45: "SYS_EL2NSYNC", + 46: "SYS_EL3HLT", + 47: "SYS_EL3RST", + 48: "SYS_ELNRNG", + 49: "SYS_EUNATCH", + 50: "SYS_ENOCSI", + 51: "SYS_EL2HLT", + 52: "SYS_EBADE", + 53: "SYS_EBADR", + 54: "SYS_EXFULL", + 55: "SYS_ENOANO", + 56: "SYS_EBADRQC", + 57: "SYS_EBADSLT", + 59: "SYS_EBFONT", + 60: "SYS_ENOSTR", + 61: "SYS_ENODATA", + 62: "SYS_ETIME", + 63: "SYS_ENOSR", + 64: "SYS_ENONET", + 65: "SYS_ENOPKG", + 66: "SYS_EREMOTE", + 67: "SYS_ENOLINK", + 68: "SYS_EADV", + 69: "SYS_ESRMNT", + 70: "SYS_ECOMM", + 71: "SYS_EPROTO", + 72: "SYS_EMULTIHOP", + 73: "SYS_EDOTDOT", + 74: "SYS_EBADMSG", + 75: "SYS_EOVERFLOW", + 76: "SYS_ENOTUNIQ", + 77: "SYS_EBADFD", + 78: "SYS_EREMCHG", + 79: "SYS_ELIBACC", + 80: "SYS_ELIBBAD", + 81: "SYS_ELIBSCN", + 82: "SYS_ELIBMAX", + 83: "SYS_ELIBEXEC", + 84: "SYS_EILSEQ", + 85: "SYS_ERESTART", + 86: "SYS_ESTRPIPE", + 87: "SYS_EUSERS", + 88: "SYS_ENOTSOCK", + 89: "SYS_EDESTADDRREQ", + 90: "SYS_EMSGSIZE", + 91: "SYS_EPROTOTYPE", + 92: "SYS_ENOPROTOOPT", + 93: "SYS_EPROTONOSUPPORT", + 94: "SYS_ESOCKTNOSUPPORT", + 95: "SYS_EOPNOTSUPP", + // Duplicate value: 95: "SYS_ENOTSUP", + 96: "SYS_EPFNOSUPPORT", + 97: "SYS_EAFNOSUPPORT", + 98: "SYS_EADDRINUSE", + 99: "SYS_EADDRNOTAVAIL", + 100: "SYS_ENETDOWN", + 101: "SYS_ENETUNREACH", + 102: "SYS_ENETRESET", + 103: "SYS_ECONNABORTED", + 104: "SYS_ECONNRESET", + 105: "SYS_ENOBUFS", + 106: "SYS_EISCONN", + 107: "SYS_ENOTCONN", + 108: "SYS_ESHUTDOWN", + 109: "SYS_ETOOMANYREFS", + 110: "SYS_ETIMEDOUT", + 111: "SYS_ECONNREFUSED", + 112: "SYS_EHOSTDOWN", + 113: "SYS_EHOSTUNREACH", + 114: "SYS_EALREADY", + 115: "SYS_EINPROGRESS", + 116: "SYS_ESTALE", + 117: "SYS_EUCLEAN", + 118: "SYS_ENOTNAM", + 119: "SYS_ENAVAIL", + 120: "SYS_EISNAM", + 121: "SYS_EREMOTEIO", + 122: "SYS_EDQUOT", + 123: "SYS_ENOMEDIUM", + 124: "SYS_EMEDIUMTYPE", + 125: "SYS_ECANCELED", + 126: "SYS_ENOKEY", + 127: "SYS_EKEYEXPIRED", + 128: "SYS_EKEYREVOKED", + 129: "SYS_EKEYREJECTED", + 130: "SYS_EOWNERDEAD", + 131: "SYS_ENOTRECOVERABLE", + 132: "SYS_ERFKILL", +} +var RemoteSocketServiceError_SystemError_value = map[string]int32{ + "SYS_SUCCESS": 0, + "SYS_EPERM": 1, + "SYS_ENOENT": 2, + "SYS_ESRCH": 3, + "SYS_EINTR": 4, + "SYS_EIO": 5, + "SYS_ENXIO": 6, + "SYS_E2BIG": 7, + "SYS_ENOEXEC": 8, + "SYS_EBADF": 9, + "SYS_ECHILD": 10, + "SYS_EAGAIN": 11, + "SYS_EWOULDBLOCK": 11, + "SYS_ENOMEM": 12, + "SYS_EACCES": 13, + "SYS_EFAULT": 14, + "SYS_ENOTBLK": 15, + "SYS_EBUSY": 16, + "SYS_EEXIST": 17, + "SYS_EXDEV": 18, + "SYS_ENODEV": 19, + "SYS_ENOTDIR": 20, + "SYS_EISDIR": 21, + "SYS_EINVAL": 22, + "SYS_ENFILE": 23, + "SYS_EMFILE": 24, + "SYS_ENOTTY": 25, + "SYS_ETXTBSY": 26, + "SYS_EFBIG": 27, + "SYS_ENOSPC": 28, + "SYS_ESPIPE": 29, + "SYS_EROFS": 30, + "SYS_EMLINK": 31, + "SYS_EPIPE": 32, + "SYS_EDOM": 33, + "SYS_ERANGE": 34, + "SYS_EDEADLK": 35, + "SYS_EDEADLOCK": 35, + "SYS_ENAMETOOLONG": 36, + "SYS_ENOLCK": 37, + "SYS_ENOSYS": 38, + "SYS_ENOTEMPTY": 39, + "SYS_ELOOP": 40, + "SYS_ENOMSG": 42, + "SYS_EIDRM": 43, + "SYS_ECHRNG": 44, + "SYS_EL2NSYNC": 45, + "SYS_EL3HLT": 46, + "SYS_EL3RST": 47, + "SYS_ELNRNG": 48, + "SYS_EUNATCH": 49, + "SYS_ENOCSI": 50, + "SYS_EL2HLT": 51, + "SYS_EBADE": 52, + "SYS_EBADR": 53, + "SYS_EXFULL": 54, + "SYS_ENOANO": 55, + "SYS_EBADRQC": 56, + "SYS_EBADSLT": 57, + "SYS_EBFONT": 59, + "SYS_ENOSTR": 60, + "SYS_ENODATA": 61, + "SYS_ETIME": 62, + "SYS_ENOSR": 63, + "SYS_ENONET": 64, + "SYS_ENOPKG": 65, + "SYS_EREMOTE": 66, + "SYS_ENOLINK": 67, + "SYS_EADV": 68, + "SYS_ESRMNT": 69, + "SYS_ECOMM": 70, + "SYS_EPROTO": 71, + "SYS_EMULTIHOP": 72, + "SYS_EDOTDOT": 73, + "SYS_EBADMSG": 74, + "SYS_EOVERFLOW": 75, + "SYS_ENOTUNIQ": 76, + "SYS_EBADFD": 77, + "SYS_EREMCHG": 78, + "SYS_ELIBACC": 79, + "SYS_ELIBBAD": 80, + "SYS_ELIBSCN": 81, + "SYS_ELIBMAX": 82, + "SYS_ELIBEXEC": 83, + "SYS_EILSEQ": 84, + "SYS_ERESTART": 85, + "SYS_ESTRPIPE": 86, + "SYS_EUSERS": 87, + "SYS_ENOTSOCK": 88, + "SYS_EDESTADDRREQ": 89, + "SYS_EMSGSIZE": 90, + "SYS_EPROTOTYPE": 91, + "SYS_ENOPROTOOPT": 92, + "SYS_EPROTONOSUPPORT": 93, + "SYS_ESOCKTNOSUPPORT": 94, + "SYS_EOPNOTSUPP": 95, + "SYS_ENOTSUP": 95, + "SYS_EPFNOSUPPORT": 96, + "SYS_EAFNOSUPPORT": 97, + "SYS_EADDRINUSE": 98, + "SYS_EADDRNOTAVAIL": 99, + "SYS_ENETDOWN": 100, + "SYS_ENETUNREACH": 101, + "SYS_ENETRESET": 102, + "SYS_ECONNABORTED": 103, + "SYS_ECONNRESET": 104, + "SYS_ENOBUFS": 105, + "SYS_EISCONN": 106, + "SYS_ENOTCONN": 107, + "SYS_ESHUTDOWN": 108, + "SYS_ETOOMANYREFS": 109, + "SYS_ETIMEDOUT": 110, + "SYS_ECONNREFUSED": 111, + "SYS_EHOSTDOWN": 112, + "SYS_EHOSTUNREACH": 113, + "SYS_EALREADY": 114, + "SYS_EINPROGRESS": 115, + "SYS_ESTALE": 116, + "SYS_EUCLEAN": 117, + "SYS_ENOTNAM": 118, + "SYS_ENAVAIL": 119, + "SYS_EISNAM": 120, + "SYS_EREMOTEIO": 121, + "SYS_EDQUOT": 122, + "SYS_ENOMEDIUM": 123, + "SYS_EMEDIUMTYPE": 124, + "SYS_ECANCELED": 125, + "SYS_ENOKEY": 126, + "SYS_EKEYEXPIRED": 127, + "SYS_EKEYREVOKED": 128, + "SYS_EKEYREJECTED": 129, + "SYS_EOWNERDEAD": 130, + "SYS_ENOTRECOVERABLE": 131, + "SYS_ERFKILL": 132, +} + +func (x RemoteSocketServiceError_SystemError) Enum() *RemoteSocketServiceError_SystemError { + p := new(RemoteSocketServiceError_SystemError) + *p = x + return p +} +func (x RemoteSocketServiceError_SystemError) String() string { + return proto.EnumName(RemoteSocketServiceError_SystemError_name, int32(x)) +} +func (x *RemoteSocketServiceError_SystemError) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_SystemError_value, data, "RemoteSocketServiceError_SystemError") + if err != nil { + return err + } + *x = RemoteSocketServiceError_SystemError(value) + return nil +} + +type CreateSocketRequest_SocketFamily int32 + +const ( + CreateSocketRequest_IPv4 CreateSocketRequest_SocketFamily = 1 + CreateSocketRequest_IPv6 CreateSocketRequest_SocketFamily = 2 +) + +var CreateSocketRequest_SocketFamily_name = map[int32]string{ + 1: "IPv4", + 2: "IPv6", +} +var CreateSocketRequest_SocketFamily_value = map[string]int32{ + "IPv4": 1, + "IPv6": 2, +} + +func (x CreateSocketRequest_SocketFamily) Enum() *CreateSocketRequest_SocketFamily { + p := new(CreateSocketRequest_SocketFamily) + *p = x + return p +} +func (x CreateSocketRequest_SocketFamily) String() string { + return proto.EnumName(CreateSocketRequest_SocketFamily_name, int32(x)) +} +func (x *CreateSocketRequest_SocketFamily) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketFamily_value, data, "CreateSocketRequest_SocketFamily") + if err != nil { + return err + } + *x = CreateSocketRequest_SocketFamily(value) + return nil +} + +type CreateSocketRequest_SocketProtocol int32 + +const ( + CreateSocketRequest_TCP CreateSocketRequest_SocketProtocol = 1 + CreateSocketRequest_UDP CreateSocketRequest_SocketProtocol = 2 +) + +var CreateSocketRequest_SocketProtocol_name = map[int32]string{ + 1: "TCP", + 2: "UDP", +} +var CreateSocketRequest_SocketProtocol_value = map[string]int32{ + "TCP": 1, + "UDP": 2, +} + +func (x CreateSocketRequest_SocketProtocol) Enum() *CreateSocketRequest_SocketProtocol { + p := new(CreateSocketRequest_SocketProtocol) + *p = x + return p +} +func (x CreateSocketRequest_SocketProtocol) String() string { + return proto.EnumName(CreateSocketRequest_SocketProtocol_name, int32(x)) +} +func (x *CreateSocketRequest_SocketProtocol) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketProtocol_value, data, "CreateSocketRequest_SocketProtocol") + if err != nil { + return err + } + *x = CreateSocketRequest_SocketProtocol(value) + return nil +} + +type SocketOption_SocketOptionLevel int32 + +const ( + SocketOption_SOCKET_SOL_IP SocketOption_SocketOptionLevel = 0 + SocketOption_SOCKET_SOL_SOCKET SocketOption_SocketOptionLevel = 1 + SocketOption_SOCKET_SOL_TCP SocketOption_SocketOptionLevel = 6 + SocketOption_SOCKET_SOL_UDP SocketOption_SocketOptionLevel = 17 +) + +var SocketOption_SocketOptionLevel_name = map[int32]string{ + 0: "SOCKET_SOL_IP", + 1: "SOCKET_SOL_SOCKET", + 6: "SOCKET_SOL_TCP", + 17: "SOCKET_SOL_UDP", +} +var SocketOption_SocketOptionLevel_value = map[string]int32{ + "SOCKET_SOL_IP": 0, + "SOCKET_SOL_SOCKET": 1, + "SOCKET_SOL_TCP": 6, + "SOCKET_SOL_UDP": 17, +} + +func (x SocketOption_SocketOptionLevel) Enum() *SocketOption_SocketOptionLevel { + p := new(SocketOption_SocketOptionLevel) + *p = x + return p +} +func (x SocketOption_SocketOptionLevel) String() string { + return proto.EnumName(SocketOption_SocketOptionLevel_name, int32(x)) +} +func (x *SocketOption_SocketOptionLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionLevel_value, data, "SocketOption_SocketOptionLevel") + if err != nil { + return err + } + *x = SocketOption_SocketOptionLevel(value) + return nil +} + +type SocketOption_SocketOptionName int32 + +const ( + SocketOption_SOCKET_SO_DEBUG SocketOption_SocketOptionName = 1 + SocketOption_SOCKET_SO_REUSEADDR SocketOption_SocketOptionName = 2 + SocketOption_SOCKET_SO_TYPE SocketOption_SocketOptionName = 3 + SocketOption_SOCKET_SO_ERROR SocketOption_SocketOptionName = 4 + SocketOption_SOCKET_SO_DONTROUTE SocketOption_SocketOptionName = 5 + SocketOption_SOCKET_SO_BROADCAST SocketOption_SocketOptionName = 6 + SocketOption_SOCKET_SO_SNDBUF SocketOption_SocketOptionName = 7 + SocketOption_SOCKET_SO_RCVBUF SocketOption_SocketOptionName = 8 + SocketOption_SOCKET_SO_KEEPALIVE SocketOption_SocketOptionName = 9 + SocketOption_SOCKET_SO_OOBINLINE SocketOption_SocketOptionName = 10 + SocketOption_SOCKET_SO_LINGER SocketOption_SocketOptionName = 13 + SocketOption_SOCKET_SO_RCVTIMEO SocketOption_SocketOptionName = 20 + SocketOption_SOCKET_SO_SNDTIMEO SocketOption_SocketOptionName = 21 + SocketOption_SOCKET_IP_TOS SocketOption_SocketOptionName = 1 + SocketOption_SOCKET_IP_TTL SocketOption_SocketOptionName = 2 + SocketOption_SOCKET_IP_HDRINCL SocketOption_SocketOptionName = 3 + SocketOption_SOCKET_IP_OPTIONS SocketOption_SocketOptionName = 4 + SocketOption_SOCKET_TCP_NODELAY SocketOption_SocketOptionName = 1 + SocketOption_SOCKET_TCP_MAXSEG SocketOption_SocketOptionName = 2 + SocketOption_SOCKET_TCP_CORK SocketOption_SocketOptionName = 3 + SocketOption_SOCKET_TCP_KEEPIDLE SocketOption_SocketOptionName = 4 + SocketOption_SOCKET_TCP_KEEPINTVL SocketOption_SocketOptionName = 5 + SocketOption_SOCKET_TCP_KEEPCNT SocketOption_SocketOptionName = 6 + SocketOption_SOCKET_TCP_SYNCNT SocketOption_SocketOptionName = 7 + SocketOption_SOCKET_TCP_LINGER2 SocketOption_SocketOptionName = 8 + SocketOption_SOCKET_TCP_DEFER_ACCEPT SocketOption_SocketOptionName = 9 + SocketOption_SOCKET_TCP_WINDOW_CLAMP SocketOption_SocketOptionName = 10 + SocketOption_SOCKET_TCP_INFO SocketOption_SocketOptionName = 11 + SocketOption_SOCKET_TCP_QUICKACK SocketOption_SocketOptionName = 12 +) + +var SocketOption_SocketOptionName_name = map[int32]string{ + 1: "SOCKET_SO_DEBUG", + 2: "SOCKET_SO_REUSEADDR", + 3: "SOCKET_SO_TYPE", + 4: "SOCKET_SO_ERROR", + 5: "SOCKET_SO_DONTROUTE", + 6: "SOCKET_SO_BROADCAST", + 7: "SOCKET_SO_SNDBUF", + 8: "SOCKET_SO_RCVBUF", + 9: "SOCKET_SO_KEEPALIVE", + 10: "SOCKET_SO_OOBINLINE", + 13: "SOCKET_SO_LINGER", + 20: "SOCKET_SO_RCVTIMEO", + 21: "SOCKET_SO_SNDTIMEO", + // Duplicate value: 1: "SOCKET_IP_TOS", + // Duplicate value: 2: "SOCKET_IP_TTL", + // Duplicate value: 3: "SOCKET_IP_HDRINCL", + // Duplicate value: 4: "SOCKET_IP_OPTIONS", + // Duplicate value: 1: "SOCKET_TCP_NODELAY", + // Duplicate value: 2: "SOCKET_TCP_MAXSEG", + // Duplicate value: 3: "SOCKET_TCP_CORK", + // Duplicate value: 4: "SOCKET_TCP_KEEPIDLE", + // Duplicate value: 5: "SOCKET_TCP_KEEPINTVL", + // Duplicate value: 6: "SOCKET_TCP_KEEPCNT", + // Duplicate value: 7: "SOCKET_TCP_SYNCNT", + // Duplicate value: 8: "SOCKET_TCP_LINGER2", + // Duplicate value: 9: "SOCKET_TCP_DEFER_ACCEPT", + // Duplicate value: 10: "SOCKET_TCP_WINDOW_CLAMP", + 11: "SOCKET_TCP_INFO", + 12: "SOCKET_TCP_QUICKACK", +} +var SocketOption_SocketOptionName_value = map[string]int32{ + "SOCKET_SO_DEBUG": 1, + "SOCKET_SO_REUSEADDR": 2, + "SOCKET_SO_TYPE": 3, + "SOCKET_SO_ERROR": 4, + "SOCKET_SO_DONTROUTE": 5, + "SOCKET_SO_BROADCAST": 6, + "SOCKET_SO_SNDBUF": 7, + "SOCKET_SO_RCVBUF": 8, + "SOCKET_SO_KEEPALIVE": 9, + "SOCKET_SO_OOBINLINE": 10, + "SOCKET_SO_LINGER": 13, + "SOCKET_SO_RCVTIMEO": 20, + "SOCKET_SO_SNDTIMEO": 21, + "SOCKET_IP_TOS": 1, + "SOCKET_IP_TTL": 2, + "SOCKET_IP_HDRINCL": 3, + "SOCKET_IP_OPTIONS": 4, + "SOCKET_TCP_NODELAY": 1, + "SOCKET_TCP_MAXSEG": 2, + "SOCKET_TCP_CORK": 3, + "SOCKET_TCP_KEEPIDLE": 4, + "SOCKET_TCP_KEEPINTVL": 5, + "SOCKET_TCP_KEEPCNT": 6, + "SOCKET_TCP_SYNCNT": 7, + "SOCKET_TCP_LINGER2": 8, + "SOCKET_TCP_DEFER_ACCEPT": 9, + "SOCKET_TCP_WINDOW_CLAMP": 10, + "SOCKET_TCP_INFO": 11, + "SOCKET_TCP_QUICKACK": 12, +} + +func (x SocketOption_SocketOptionName) Enum() *SocketOption_SocketOptionName { + p := new(SocketOption_SocketOptionName) + *p = x + return p +} +func (x SocketOption_SocketOptionName) String() string { + return proto.EnumName(SocketOption_SocketOptionName_name, int32(x)) +} +func (x *SocketOption_SocketOptionName) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionName_value, data, "SocketOption_SocketOptionName") + if err != nil { + return err + } + *x = SocketOption_SocketOptionName(value) + return nil +} + +type ShutDownRequest_How int32 + +const ( + ShutDownRequest_SOCKET_SHUT_RD ShutDownRequest_How = 1 + ShutDownRequest_SOCKET_SHUT_WR ShutDownRequest_How = 2 + ShutDownRequest_SOCKET_SHUT_RDWR ShutDownRequest_How = 3 +) + +var ShutDownRequest_How_name = map[int32]string{ + 1: "SOCKET_SHUT_RD", + 2: "SOCKET_SHUT_WR", + 3: "SOCKET_SHUT_RDWR", +} +var ShutDownRequest_How_value = map[string]int32{ + "SOCKET_SHUT_RD": 1, + "SOCKET_SHUT_WR": 2, + "SOCKET_SHUT_RDWR": 3, +} + +func (x ShutDownRequest_How) Enum() *ShutDownRequest_How { + p := new(ShutDownRequest_How) + *p = x + return p +} +func (x ShutDownRequest_How) String() string { + return proto.EnumName(ShutDownRequest_How_name, int32(x)) +} +func (x *ShutDownRequest_How) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ShutDownRequest_How_value, data, "ShutDownRequest_How") + if err != nil { + return err + } + *x = ShutDownRequest_How(value) + return nil +} + +type ReceiveRequest_Flags int32 + +const ( + ReceiveRequest_MSG_OOB ReceiveRequest_Flags = 1 + ReceiveRequest_MSG_PEEK ReceiveRequest_Flags = 2 +) + +var ReceiveRequest_Flags_name = map[int32]string{ + 1: "MSG_OOB", + 2: "MSG_PEEK", +} +var ReceiveRequest_Flags_value = map[string]int32{ + "MSG_OOB": 1, + "MSG_PEEK": 2, +} + +func (x ReceiveRequest_Flags) Enum() *ReceiveRequest_Flags { + p := new(ReceiveRequest_Flags) + *p = x + return p +} +func (x ReceiveRequest_Flags) String() string { + return proto.EnumName(ReceiveRequest_Flags_name, int32(x)) +} +func (x *ReceiveRequest_Flags) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ReceiveRequest_Flags_value, data, "ReceiveRequest_Flags") + if err != nil { + return err + } + *x = ReceiveRequest_Flags(value) + return nil +} + +type PollEvent_PollEventFlag int32 + +const ( + PollEvent_SOCKET_POLLNONE PollEvent_PollEventFlag = 0 + PollEvent_SOCKET_POLLIN PollEvent_PollEventFlag = 1 + PollEvent_SOCKET_POLLPRI PollEvent_PollEventFlag = 2 + PollEvent_SOCKET_POLLOUT PollEvent_PollEventFlag = 4 + PollEvent_SOCKET_POLLERR PollEvent_PollEventFlag = 8 + PollEvent_SOCKET_POLLHUP PollEvent_PollEventFlag = 16 + PollEvent_SOCKET_POLLNVAL PollEvent_PollEventFlag = 32 + PollEvent_SOCKET_POLLRDNORM PollEvent_PollEventFlag = 64 + PollEvent_SOCKET_POLLRDBAND PollEvent_PollEventFlag = 128 + PollEvent_SOCKET_POLLWRNORM PollEvent_PollEventFlag = 256 + PollEvent_SOCKET_POLLWRBAND PollEvent_PollEventFlag = 512 + PollEvent_SOCKET_POLLMSG PollEvent_PollEventFlag = 1024 + PollEvent_SOCKET_POLLREMOVE PollEvent_PollEventFlag = 4096 + PollEvent_SOCKET_POLLRDHUP PollEvent_PollEventFlag = 8192 +) + +var PollEvent_PollEventFlag_name = map[int32]string{ + 0: "SOCKET_POLLNONE", + 1: "SOCKET_POLLIN", + 2: "SOCKET_POLLPRI", + 4: "SOCKET_POLLOUT", + 8: "SOCKET_POLLERR", + 16: "SOCKET_POLLHUP", + 32: "SOCKET_POLLNVAL", + 64: "SOCKET_POLLRDNORM", + 128: "SOCKET_POLLRDBAND", + 256: "SOCKET_POLLWRNORM", + 512: "SOCKET_POLLWRBAND", + 1024: "SOCKET_POLLMSG", + 4096: "SOCKET_POLLREMOVE", + 8192: "SOCKET_POLLRDHUP", +} +var PollEvent_PollEventFlag_value = map[string]int32{ + "SOCKET_POLLNONE": 0, + "SOCKET_POLLIN": 1, + "SOCKET_POLLPRI": 2, + "SOCKET_POLLOUT": 4, + "SOCKET_POLLERR": 8, + "SOCKET_POLLHUP": 16, + "SOCKET_POLLNVAL": 32, + "SOCKET_POLLRDNORM": 64, + "SOCKET_POLLRDBAND": 128, + "SOCKET_POLLWRNORM": 256, + "SOCKET_POLLWRBAND": 512, + "SOCKET_POLLMSG": 1024, + "SOCKET_POLLREMOVE": 4096, + "SOCKET_POLLRDHUP": 8192, +} + +func (x PollEvent_PollEventFlag) Enum() *PollEvent_PollEventFlag { + p := new(PollEvent_PollEventFlag) + *p = x + return p +} +func (x PollEvent_PollEventFlag) String() string { + return proto.EnumName(PollEvent_PollEventFlag_name, int32(x)) +} +func (x *PollEvent_PollEventFlag) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PollEvent_PollEventFlag_value, data, "PollEvent_PollEventFlag") + if err != nil { + return err + } + *x = PollEvent_PollEventFlag(value) + return nil +} + +type ResolveReply_ErrorCode int32 + +const ( + ResolveReply_SOCKET_EAI_ADDRFAMILY ResolveReply_ErrorCode = 1 + ResolveReply_SOCKET_EAI_AGAIN ResolveReply_ErrorCode = 2 + ResolveReply_SOCKET_EAI_BADFLAGS ResolveReply_ErrorCode = 3 + ResolveReply_SOCKET_EAI_FAIL ResolveReply_ErrorCode = 4 + ResolveReply_SOCKET_EAI_FAMILY ResolveReply_ErrorCode = 5 + ResolveReply_SOCKET_EAI_MEMORY ResolveReply_ErrorCode = 6 + ResolveReply_SOCKET_EAI_NODATA ResolveReply_ErrorCode = 7 + ResolveReply_SOCKET_EAI_NONAME ResolveReply_ErrorCode = 8 + ResolveReply_SOCKET_EAI_SERVICE ResolveReply_ErrorCode = 9 + ResolveReply_SOCKET_EAI_SOCKTYPE ResolveReply_ErrorCode = 10 + ResolveReply_SOCKET_EAI_SYSTEM ResolveReply_ErrorCode = 11 + ResolveReply_SOCKET_EAI_BADHINTS ResolveReply_ErrorCode = 12 + ResolveReply_SOCKET_EAI_PROTOCOL ResolveReply_ErrorCode = 13 + ResolveReply_SOCKET_EAI_OVERFLOW ResolveReply_ErrorCode = 14 + ResolveReply_SOCKET_EAI_MAX ResolveReply_ErrorCode = 15 +) + +var ResolveReply_ErrorCode_name = map[int32]string{ + 1: "SOCKET_EAI_ADDRFAMILY", + 2: "SOCKET_EAI_AGAIN", + 3: "SOCKET_EAI_BADFLAGS", + 4: "SOCKET_EAI_FAIL", + 5: "SOCKET_EAI_FAMILY", + 6: "SOCKET_EAI_MEMORY", + 7: "SOCKET_EAI_NODATA", + 8: "SOCKET_EAI_NONAME", + 9: "SOCKET_EAI_SERVICE", + 10: "SOCKET_EAI_SOCKTYPE", + 11: "SOCKET_EAI_SYSTEM", + 12: "SOCKET_EAI_BADHINTS", + 13: "SOCKET_EAI_PROTOCOL", + 14: "SOCKET_EAI_OVERFLOW", + 15: "SOCKET_EAI_MAX", +} +var ResolveReply_ErrorCode_value = map[string]int32{ + "SOCKET_EAI_ADDRFAMILY": 1, + "SOCKET_EAI_AGAIN": 2, + "SOCKET_EAI_BADFLAGS": 3, + "SOCKET_EAI_FAIL": 4, + "SOCKET_EAI_FAMILY": 5, + "SOCKET_EAI_MEMORY": 6, + "SOCKET_EAI_NODATA": 7, + "SOCKET_EAI_NONAME": 8, + "SOCKET_EAI_SERVICE": 9, + "SOCKET_EAI_SOCKTYPE": 10, + "SOCKET_EAI_SYSTEM": 11, + "SOCKET_EAI_BADHINTS": 12, + "SOCKET_EAI_PROTOCOL": 13, + "SOCKET_EAI_OVERFLOW": 14, + "SOCKET_EAI_MAX": 15, +} + +func (x ResolveReply_ErrorCode) Enum() *ResolveReply_ErrorCode { + p := new(ResolveReply_ErrorCode) + *p = x + return p +} +func (x ResolveReply_ErrorCode) String() string { + return proto.EnumName(ResolveReply_ErrorCode_name, int32(x)) +} +func (x *ResolveReply_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ResolveReply_ErrorCode_value, data, "ResolveReply_ErrorCode") + if err != nil { + return err + } + *x = ResolveReply_ErrorCode(value) + return nil +} + +type RemoteSocketServiceError struct { + SystemError *int32 `protobuf:"varint,1,opt,name=system_error,def=0" json:"system_error,omitempty"` + ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail" json:"error_detail,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RemoteSocketServiceError) Reset() { *m = RemoteSocketServiceError{} } +func (m *RemoteSocketServiceError) String() string { return proto.CompactTextString(m) } +func (*RemoteSocketServiceError) ProtoMessage() {} + +const Default_RemoteSocketServiceError_SystemError int32 = 0 + +func (m *RemoteSocketServiceError) GetSystemError() int32 { + if m != nil && m.SystemError != nil { + return *m.SystemError + } + return Default_RemoteSocketServiceError_SystemError +} + +func (m *RemoteSocketServiceError) GetErrorDetail() string { + if m != nil && m.ErrorDetail != nil { + return *m.ErrorDetail + } + return "" +} + +type AddressPort struct { + Port *int32 `protobuf:"varint,1,req,name=port" json:"port,omitempty"` + PackedAddress []byte `protobuf:"bytes,2,opt,name=packed_address" json:"packed_address,omitempty"` + HostnameHint *string `protobuf:"bytes,3,opt,name=hostname_hint" json:"hostname_hint,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AddressPort) Reset() { *m = AddressPort{} } +func (m *AddressPort) String() string { return proto.CompactTextString(m) } +func (*AddressPort) ProtoMessage() {} + +func (m *AddressPort) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return 0 +} + +func (m *AddressPort) GetPackedAddress() []byte { + if m != nil { + return m.PackedAddress + } + return nil +} + +func (m *AddressPort) GetHostnameHint() string { + if m != nil && m.HostnameHint != nil { + return *m.HostnameHint + } + return "" +} + +type CreateSocketRequest struct { + Family *CreateSocketRequest_SocketFamily `protobuf:"varint,1,req,name=family,enum=appengine.CreateSocketRequest_SocketFamily" json:"family,omitempty"` + Protocol *CreateSocketRequest_SocketProtocol `protobuf:"varint,2,req,name=protocol,enum=appengine.CreateSocketRequest_SocketProtocol" json:"protocol,omitempty"` + SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options" json:"socket_options,omitempty"` + ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` + ListenBacklog *int32 `protobuf:"varint,5,opt,name=listen_backlog,def=0" json:"listen_backlog,omitempty"` + RemoteIp *AddressPort `protobuf:"bytes,6,opt,name=remote_ip" json:"remote_ip,omitempty"` + AppId *string `protobuf:"bytes,9,opt,name=app_id" json:"app_id,omitempty"` + ProjectId *int64 `protobuf:"varint,10,opt,name=project_id" json:"project_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateSocketRequest) Reset() { *m = CreateSocketRequest{} } +func (m *CreateSocketRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSocketRequest) ProtoMessage() {} + +const Default_CreateSocketRequest_ListenBacklog int32 = 0 + +func (m *CreateSocketRequest) GetFamily() CreateSocketRequest_SocketFamily { + if m != nil && m.Family != nil { + return *m.Family + } + return CreateSocketRequest_IPv4 +} + +func (m *CreateSocketRequest) GetProtocol() CreateSocketRequest_SocketProtocol { + if m != nil && m.Protocol != nil { + return *m.Protocol + } + return CreateSocketRequest_TCP +} + +func (m *CreateSocketRequest) GetSocketOptions() []*SocketOption { + if m != nil { + return m.SocketOptions + } + return nil +} + +func (m *CreateSocketRequest) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +func (m *CreateSocketRequest) GetListenBacklog() int32 { + if m != nil && m.ListenBacklog != nil { + return *m.ListenBacklog + } + return Default_CreateSocketRequest_ListenBacklog +} + +func (m *CreateSocketRequest) GetRemoteIp() *AddressPort { + if m != nil { + return m.RemoteIp + } + return nil +} + +func (m *CreateSocketRequest) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *CreateSocketRequest) GetProjectId() int64 { + if m != nil && m.ProjectId != nil { + return *m.ProjectId + } + return 0 +} + +type CreateSocketReply struct { + SocketDescriptor *string `protobuf:"bytes,1,opt,name=socket_descriptor" json:"socket_descriptor,omitempty"` + ServerAddress *AddressPort `protobuf:"bytes,3,opt,name=server_address" json:"server_address,omitempty"` + ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateSocketReply) Reset() { *m = CreateSocketReply{} } +func (m *CreateSocketReply) String() string { return proto.CompactTextString(m) } +func (*CreateSocketReply) ProtoMessage() {} + +var extRange_CreateSocketReply = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*CreateSocketReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_CreateSocketReply +} +func (m *CreateSocketReply) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *CreateSocketReply) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *CreateSocketReply) GetServerAddress() *AddressPort { + if m != nil { + return m.ServerAddress + } + return nil +} + +func (m *CreateSocketReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type BindRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + ProxyExternalIp *AddressPort `protobuf:"bytes,2,req,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BindRequest) Reset() { *m = BindRequest{} } +func (m *BindRequest) String() string { return proto.CompactTextString(m) } +func (*BindRequest) ProtoMessage() {} + +func (m *BindRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *BindRequest) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type BindReply struct { + ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BindReply) Reset() { *m = BindReply{} } +func (m *BindReply) String() string { return proto.CompactTextString(m) } +func (*BindReply) ProtoMessage() {} + +func (m *BindReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type GetSocketNameRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetSocketNameRequest) Reset() { *m = GetSocketNameRequest{} } +func (m *GetSocketNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetSocketNameRequest) ProtoMessage() {} + +func (m *GetSocketNameRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +type GetSocketNameReply struct { + ProxyExternalIp *AddressPort `protobuf:"bytes,2,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetSocketNameReply) Reset() { *m = GetSocketNameReply{} } +func (m *GetSocketNameReply) String() string { return proto.CompactTextString(m) } +func (*GetSocketNameReply) ProtoMessage() {} + +func (m *GetSocketNameReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type GetPeerNameRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetPeerNameRequest) Reset() { *m = GetPeerNameRequest{} } +func (m *GetPeerNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetPeerNameRequest) ProtoMessage() {} + +func (m *GetPeerNameRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +type GetPeerNameReply struct { + PeerIp *AddressPort `protobuf:"bytes,2,opt,name=peer_ip" json:"peer_ip,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetPeerNameReply) Reset() { *m = GetPeerNameReply{} } +func (m *GetPeerNameReply) String() string { return proto.CompactTextString(m) } +func (*GetPeerNameReply) ProtoMessage() {} + +func (m *GetPeerNameReply) GetPeerIp() *AddressPort { + if m != nil { + return m.PeerIp + } + return nil +} + +type SocketOption struct { + Level *SocketOption_SocketOptionLevel `protobuf:"varint,1,req,name=level,enum=appengine.SocketOption_SocketOptionLevel" json:"level,omitempty"` + Option *SocketOption_SocketOptionName `protobuf:"varint,2,req,name=option,enum=appengine.SocketOption_SocketOptionName" json:"option,omitempty"` + Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SocketOption) Reset() { *m = SocketOption{} } +func (m *SocketOption) String() string { return proto.CompactTextString(m) } +func (*SocketOption) ProtoMessage() {} + +func (m *SocketOption) GetLevel() SocketOption_SocketOptionLevel { + if m != nil && m.Level != nil { + return *m.Level + } + return SocketOption_SOCKET_SOL_IP +} + +func (m *SocketOption) GetOption() SocketOption_SocketOptionName { + if m != nil && m.Option != nil { + return *m.Option + } + return SocketOption_SOCKET_SO_DEBUG +} + +func (m *SocketOption) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type SetSocketOptionsRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetSocketOptionsRequest) Reset() { *m = SetSocketOptionsRequest{} } +func (m *SetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } +func (*SetSocketOptionsRequest) ProtoMessage() {} + +func (m *SetSocketOptionsRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *SetSocketOptionsRequest) GetOptions() []*SocketOption { + if m != nil { + return m.Options + } + return nil +} + +type SetSocketOptionsReply struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetSocketOptionsReply) Reset() { *m = SetSocketOptionsReply{} } +func (m *SetSocketOptionsReply) String() string { return proto.CompactTextString(m) } +func (*SetSocketOptionsReply) ProtoMessage() {} + +type GetSocketOptionsRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetSocketOptionsRequest) Reset() { *m = GetSocketOptionsRequest{} } +func (m *GetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } +func (*GetSocketOptionsRequest) ProtoMessage() {} + +func (m *GetSocketOptionsRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *GetSocketOptionsRequest) GetOptions() []*SocketOption { + if m != nil { + return m.Options + } + return nil +} + +type GetSocketOptionsReply struct { + Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetSocketOptionsReply) Reset() { *m = GetSocketOptionsReply{} } +func (m *GetSocketOptionsReply) String() string { return proto.CompactTextString(m) } +func (*GetSocketOptionsReply) ProtoMessage() {} + +func (m *GetSocketOptionsReply) GetOptions() []*SocketOption { + if m != nil { + return m.Options + } + return nil +} + +type ConnectRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + RemoteIp *AddressPort `protobuf:"bytes,2,req,name=remote_ip" json:"remote_ip,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,3,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } +func (m *ConnectRequest) String() string { return proto.CompactTextString(m) } +func (*ConnectRequest) ProtoMessage() {} + +const Default_ConnectRequest_TimeoutSeconds float64 = -1 + +func (m *ConnectRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ConnectRequest) GetRemoteIp() *AddressPort { + if m != nil { + return m.RemoteIp + } + return nil +} + +func (m *ConnectRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_ConnectRequest_TimeoutSeconds +} + +type ConnectReply struct { + ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConnectReply) Reset() { *m = ConnectReply{} } +func (m *ConnectReply) String() string { return proto.CompactTextString(m) } +func (*ConnectReply) ProtoMessage() {} + +var extRange_ConnectReply = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*ConnectReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ConnectReply +} +func (m *ConnectReply) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *ConnectReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type ListenRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + Backlog *int32 `protobuf:"varint,2,req,name=backlog" json:"backlog,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListenRequest) Reset() { *m = ListenRequest{} } +func (m *ListenRequest) String() string { return proto.CompactTextString(m) } +func (*ListenRequest) ProtoMessage() {} + +func (m *ListenRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ListenRequest) GetBacklog() int32 { + if m != nil && m.Backlog != nil { + return *m.Backlog + } + return 0 +} + +type ListenReply struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListenReply) Reset() { *m = ListenReply{} } +func (m *ListenReply) String() string { return proto.CompactTextString(m) } +func (*ListenReply) ProtoMessage() {} + +type AcceptRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AcceptRequest) Reset() { *m = AcceptRequest{} } +func (m *AcceptRequest) String() string { return proto.CompactTextString(m) } +func (*AcceptRequest) ProtoMessage() {} + +const Default_AcceptRequest_TimeoutSeconds float64 = -1 + +func (m *AcceptRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *AcceptRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_AcceptRequest_TimeoutSeconds +} + +type AcceptReply struct { + NewSocketDescriptor []byte `protobuf:"bytes,2,opt,name=new_socket_descriptor" json:"new_socket_descriptor,omitempty"` + RemoteAddress *AddressPort `protobuf:"bytes,3,opt,name=remote_address" json:"remote_address,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AcceptReply) Reset() { *m = AcceptReply{} } +func (m *AcceptReply) String() string { return proto.CompactTextString(m) } +func (*AcceptReply) ProtoMessage() {} + +func (m *AcceptReply) GetNewSocketDescriptor() []byte { + if m != nil { + return m.NewSocketDescriptor + } + return nil +} + +func (m *AcceptReply) GetRemoteAddress() *AddressPort { + if m != nil { + return m.RemoteAddress + } + return nil +} + +type ShutDownRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + How *ShutDownRequest_How `protobuf:"varint,2,req,name=how,enum=appengine.ShutDownRequest_How" json:"how,omitempty"` + SendOffset *int64 `protobuf:"varint,3,req,name=send_offset" json:"send_offset,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ShutDownRequest) Reset() { *m = ShutDownRequest{} } +func (m *ShutDownRequest) String() string { return proto.CompactTextString(m) } +func (*ShutDownRequest) ProtoMessage() {} + +func (m *ShutDownRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ShutDownRequest) GetHow() ShutDownRequest_How { + if m != nil && m.How != nil { + return *m.How + } + return ShutDownRequest_SOCKET_SHUT_RD +} + +func (m *ShutDownRequest) GetSendOffset() int64 { + if m != nil && m.SendOffset != nil { + return *m.SendOffset + } + return 0 +} + +type ShutDownReply struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ShutDownReply) Reset() { *m = ShutDownReply{} } +func (m *ShutDownReply) String() string { return proto.CompactTextString(m) } +func (*ShutDownReply) ProtoMessage() {} + +type CloseRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + SendOffset *int64 `protobuf:"varint,2,opt,name=send_offset,def=-1" json:"send_offset,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CloseRequest) Reset() { *m = CloseRequest{} } +func (m *CloseRequest) String() string { return proto.CompactTextString(m) } +func (*CloseRequest) ProtoMessage() {} + +const Default_CloseRequest_SendOffset int64 = -1 + +func (m *CloseRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *CloseRequest) GetSendOffset() int64 { + if m != nil && m.SendOffset != nil { + return *m.SendOffset + } + return Default_CloseRequest_SendOffset +} + +type CloseReply struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *CloseReply) Reset() { *m = CloseReply{} } +func (m *CloseReply) String() string { return proto.CompactTextString(m) } +func (*CloseReply) ProtoMessage() {} + +type SendRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + Data []byte `protobuf:"bytes,2,req,name=data" json:"data,omitempty"` + StreamOffset *int64 `protobuf:"varint,3,req,name=stream_offset" json:"stream_offset,omitempty"` + Flags *int32 `protobuf:"varint,4,opt,name=flags,def=0" json:"flags,omitempty"` + SendTo *AddressPort `protobuf:"bytes,5,opt,name=send_to" json:"send_to,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,6,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SendRequest) Reset() { *m = SendRequest{} } +func (m *SendRequest) String() string { return proto.CompactTextString(m) } +func (*SendRequest) ProtoMessage() {} + +const Default_SendRequest_Flags int32 = 0 +const Default_SendRequest_TimeoutSeconds float64 = -1 + +func (m *SendRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *SendRequest) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *SendRequest) GetStreamOffset() int64 { + if m != nil && m.StreamOffset != nil { + return *m.StreamOffset + } + return 0 +} + +func (m *SendRequest) GetFlags() int32 { + if m != nil && m.Flags != nil { + return *m.Flags + } + return Default_SendRequest_Flags +} + +func (m *SendRequest) GetSendTo() *AddressPort { + if m != nil { + return m.SendTo + } + return nil +} + +func (m *SendRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_SendRequest_TimeoutSeconds +} + +type SendReply struct { + DataSent *int32 `protobuf:"varint,1,opt,name=data_sent" json:"data_sent,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SendReply) Reset() { *m = SendReply{} } +func (m *SendReply) String() string { return proto.CompactTextString(m) } +func (*SendReply) ProtoMessage() {} + +func (m *SendReply) GetDataSent() int32 { + if m != nil && m.DataSent != nil { + return *m.DataSent + } + return 0 +} + +type ReceiveRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + DataSize *int32 `protobuf:"varint,2,req,name=data_size" json:"data_size,omitempty"` + Flags *int32 `protobuf:"varint,3,opt,name=flags,def=0" json:"flags,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,5,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReceiveRequest) Reset() { *m = ReceiveRequest{} } +func (m *ReceiveRequest) String() string { return proto.CompactTextString(m) } +func (*ReceiveRequest) ProtoMessage() {} + +const Default_ReceiveRequest_Flags int32 = 0 +const Default_ReceiveRequest_TimeoutSeconds float64 = -1 + +func (m *ReceiveRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ReceiveRequest) GetDataSize() int32 { + if m != nil && m.DataSize != nil { + return *m.DataSize + } + return 0 +} + +func (m *ReceiveRequest) GetFlags() int32 { + if m != nil && m.Flags != nil { + return *m.Flags + } + return Default_ReceiveRequest_Flags +} + +func (m *ReceiveRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_ReceiveRequest_TimeoutSeconds +} + +type ReceiveReply struct { + StreamOffset *int64 `protobuf:"varint,2,opt,name=stream_offset" json:"stream_offset,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` + ReceivedFrom *AddressPort `protobuf:"bytes,4,opt,name=received_from" json:"received_from,omitempty"` + BufferSize *int32 `protobuf:"varint,5,opt,name=buffer_size" json:"buffer_size,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReceiveReply) Reset() { *m = ReceiveReply{} } +func (m *ReceiveReply) String() string { return proto.CompactTextString(m) } +func (*ReceiveReply) ProtoMessage() {} + +func (m *ReceiveReply) GetStreamOffset() int64 { + if m != nil && m.StreamOffset != nil { + return *m.StreamOffset + } + return 0 +} + +func (m *ReceiveReply) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *ReceiveReply) GetReceivedFrom() *AddressPort { + if m != nil { + return m.ReceivedFrom + } + return nil +} + +func (m *ReceiveReply) GetBufferSize() int32 { + if m != nil && m.BufferSize != nil { + return *m.BufferSize + } + return 0 +} + +type PollEvent struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + RequestedEvents *int32 `protobuf:"varint,2,req,name=requested_events" json:"requested_events,omitempty"` + ObservedEvents *int32 `protobuf:"varint,3,req,name=observed_events" json:"observed_events,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PollEvent) Reset() { *m = PollEvent{} } +func (m *PollEvent) String() string { return proto.CompactTextString(m) } +func (*PollEvent) ProtoMessage() {} + +func (m *PollEvent) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *PollEvent) GetRequestedEvents() int32 { + if m != nil && m.RequestedEvents != nil { + return *m.RequestedEvents + } + return 0 +} + +func (m *PollEvent) GetObservedEvents() int32 { + if m != nil && m.ObservedEvents != nil { + return *m.ObservedEvents + } + return 0 +} + +type PollRequest struct { + Events []*PollEvent `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PollRequest) Reset() { *m = PollRequest{} } +func (m *PollRequest) String() string { return proto.CompactTextString(m) } +func (*PollRequest) ProtoMessage() {} + +const Default_PollRequest_TimeoutSeconds float64 = -1 + +func (m *PollRequest) GetEvents() []*PollEvent { + if m != nil { + return m.Events + } + return nil +} + +func (m *PollRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_PollRequest_TimeoutSeconds +} + +type PollReply struct { + Events []*PollEvent `protobuf:"bytes,2,rep,name=events" json:"events,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PollReply) Reset() { *m = PollReply{} } +func (m *PollReply) String() string { return proto.CompactTextString(m) } +func (*PollReply) ProtoMessage() {} + +func (m *PollReply) GetEvents() []*PollEvent { + if m != nil { + return m.Events + } + return nil +} + +type ResolveRequest struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + AddressFamilies []CreateSocketRequest_SocketFamily `protobuf:"varint,2,rep,name=address_families,enum=appengine.CreateSocketRequest_SocketFamily" json:"address_families,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ResolveRequest) Reset() { *m = ResolveRequest{} } +func (m *ResolveRequest) String() string { return proto.CompactTextString(m) } +func (*ResolveRequest) ProtoMessage() {} + +func (m *ResolveRequest) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ResolveRequest) GetAddressFamilies() []CreateSocketRequest_SocketFamily { + if m != nil { + return m.AddressFamilies + } + return nil +} + +type ResolveReply struct { + PackedAddress [][]byte `protobuf:"bytes,2,rep,name=packed_address" json:"packed_address,omitempty"` + CanonicalName *string `protobuf:"bytes,3,opt,name=canonical_name" json:"canonical_name,omitempty"` + Aliases []string `protobuf:"bytes,4,rep,name=aliases" json:"aliases,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ResolveReply) Reset() { *m = ResolveReply{} } +func (m *ResolveReply) String() string { return proto.CompactTextString(m) } +func (*ResolveReply) ProtoMessage() {} + +func (m *ResolveReply) GetPackedAddress() [][]byte { + if m != nil { + return m.PackedAddress + } + return nil +} + +func (m *ResolveReply) GetCanonicalName() string { + if m != nil && m.CanonicalName != nil { + return *m.CanonicalName + } + return "" +} + +func (m *ResolveReply) GetAliases() []string { + if m != nil { + return m.Aliases + } + return nil +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/socket/doc.go b/vendor/google.golang.org/appengine/socket/doc.go new file mode 100644 index 000000000..3de46df82 --- /dev/null +++ b/vendor/google.golang.org/appengine/socket/doc.go @@ -0,0 +1,10 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package socket provides outbound network sockets. +// +// This package is only required in the classic App Engine environment. +// Applications running only in App Engine "flexible environment" should +// use the standard library's net package. +package socket diff --git a/vendor/google.golang.org/appengine/socket/socket_classic.go b/vendor/google.golang.org/appengine/socket/socket_classic.go new file mode 100644 index 000000000..0ad50e2d3 --- /dev/null +++ b/vendor/google.golang.org/appengine/socket/socket_classic.go @@ -0,0 +1,290 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package socket + +import ( + "fmt" + "io" + "net" + "strconv" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/appengine/internal" + + pb "google.golang.org/appengine/internal/socket" +) + +// Dial connects to the address addr on the network protocol. +// The address format is host:port, where host may be a hostname or an IP address. +// Known protocols are "tcp" and "udp". +// The returned connection satisfies net.Conn, and is valid while ctx is valid; +// if the connection is to be used after ctx becomes invalid, invoke SetContext +// with the new context. +func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { + return DialTimeout(ctx, protocol, addr, 0) +} + +var ipFamilies = []pb.CreateSocketRequest_SocketFamily{ + pb.CreateSocketRequest_IPv4, + pb.CreateSocketRequest_IPv6, +} + +// DialTimeout is like Dial but takes a timeout. +// The timeout includes name resolution, if required. +func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { + dialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn. + if timeout > 0 { + var cancel context.CancelFunc + dialCtx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + } + + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := strconv.Atoi(portStr) + if err != nil { + return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err) + } + + var prot pb.CreateSocketRequest_SocketProtocol + switch protocol { + case "tcp": + prot = pb.CreateSocketRequest_TCP + case "udp": + prot = pb.CreateSocketRequest_UDP + default: + return nil, fmt.Errorf("socket: unknown protocol %q", protocol) + } + + packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host) + if err != nil { + return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) + } + if len(packedAddrs) == 0 { + return nil, fmt.Errorf("no addresses for %q", host) + } + + packedAddr := packedAddrs[0] // use first address + fam := pb.CreateSocketRequest_IPv4 + if len(packedAddr) == net.IPv6len { + fam = pb.CreateSocketRequest_IPv6 + } + + req := &pb.CreateSocketRequest{ + Family: fam.Enum(), + Protocol: prot.Enum(), + RemoteIp: &pb.AddressPort{ + Port: proto.Int32(int32(port)), + PackedAddress: packedAddr, + }, + } + if resolved { + req.RemoteIp.HostnameHint = &host + } + res := &pb.CreateSocketReply{} + if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil { + return nil, err + } + + return &Conn{ + ctx: ctx, + desc: res.GetSocketDescriptor(), + prot: prot, + local: res.ProxyExternalIp, + remote: req.RemoteIp, + }, nil +} + +// LookupIP returns the given host's IP addresses. +func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { + packedAddrs, _, err := resolve(ctx, ipFamilies, host) + if err != nil { + return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) + } + addrs = make([]net.IP, len(packedAddrs)) + for i, pa := range packedAddrs { + addrs[i] = net.IP(pa) + } + return addrs, nil +} + +func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) { + // Check if it's an IP address. + if ip := net.ParseIP(host); ip != nil { + if ip := ip.To4(); ip != nil { + return [][]byte{ip}, false, nil + } + return [][]byte{ip}, false, nil + } + + req := &pb.ResolveRequest{ + Name: &host, + AddressFamilies: fams, + } + res := &pb.ResolveReply{} + if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil { + // XXX: need to map to pb.ResolveReply_ErrorCode? + return nil, false, err + } + return res.PackedAddress, true, nil +} + +// withDeadline is like context.WithDeadline, except it ignores the zero deadline. +func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) { + if deadline.IsZero() { + return parent, func() {} + } + return context.WithDeadline(parent, deadline) +} + +// Conn represents a socket connection. +// It implements net.Conn. +type Conn struct { + ctx context.Context + desc string + offset int64 + + prot pb.CreateSocketRequest_SocketProtocol + local, remote *pb.AddressPort + + readDeadline, writeDeadline time.Time // optional +} + +// SetContext sets the context that is used by this Conn. +// It is usually used only when using a Conn that was created in a different context, +// such as when a connection is created during a warmup request but used while +// servicing a user request. +func (cn *Conn) SetContext(ctx context.Context) { + cn.ctx = ctx +} + +func (cn *Conn) Read(b []byte) (n int, err error) { + const maxRead = 1 << 20 + if len(b) > maxRead { + b = b[:maxRead] + } + + req := &pb.ReceiveRequest{ + SocketDescriptor: &cn.desc, + DataSize: proto.Int32(int32(len(b))), + } + res := &pb.ReceiveReply{} + if !cn.readDeadline.IsZero() { + req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds()) + } + ctx, cancel := withDeadline(cn.ctx, cn.readDeadline) + defer cancel() + if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil { + return 0, err + } + if len(res.Data) == 0 { + return 0, io.EOF + } + if len(res.Data) > len(b) { + return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b)) + } + return copy(b, res.Data), nil +} + +func (cn *Conn) Write(b []byte) (n int, err error) { + const lim = 1 << 20 // max per chunk + + for n < len(b) { + chunk := b[n:] + if len(chunk) > lim { + chunk = chunk[:lim] + } + + req := &pb.SendRequest{ + SocketDescriptor: &cn.desc, + Data: chunk, + StreamOffset: &cn.offset, + } + res := &pb.SendReply{} + if !cn.writeDeadline.IsZero() { + req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds()) + } + ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline) + defer cancel() + if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil { + // assume zero bytes were sent in this RPC + break + } + n += int(res.GetDataSent()) + cn.offset += int64(res.GetDataSent()) + } + + return +} + +func (cn *Conn) Close() error { + req := &pb.CloseRequest{ + SocketDescriptor: &cn.desc, + } + res := &pb.CloseReply{} + if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil { + return err + } + cn.desc = "CLOSED" + return nil +} + +func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr { + if ap == nil { + return nil + } + switch prot { + case pb.CreateSocketRequest_TCP: + return &net.TCPAddr{ + IP: net.IP(ap.PackedAddress), + Port: int(*ap.Port), + } + case pb.CreateSocketRequest_UDP: + return &net.UDPAddr{ + IP: net.IP(ap.PackedAddress), + Port: int(*ap.Port), + } + } + panic("unknown protocol " + prot.String()) +} + +func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) } +func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) } + +func (cn *Conn) SetDeadline(t time.Time) error { + cn.readDeadline = t + cn.writeDeadline = t + return nil +} + +func (cn *Conn) SetReadDeadline(t time.Time) error { + cn.readDeadline = t + return nil +} + +func (cn *Conn) SetWriteDeadline(t time.Time) error { + cn.writeDeadline = t + return nil +} + +// KeepAlive signals that the connection is still in use. +// It may be called to prevent the socket being closed due to inactivity. +func (cn *Conn) KeepAlive() error { + req := &pb.GetSocketNameRequest{ + SocketDescriptor: &cn.desc, + } + res := &pb.GetSocketNameReply{} + return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res) +} + +func init() { + internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/socket/socket_vm.go b/vendor/google.golang.org/appengine/socket/socket_vm.go new file mode 100644 index 000000000..c804169a1 --- /dev/null +++ b/vendor/google.golang.org/appengine/socket/socket_vm.go @@ -0,0 +1,64 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package socket + +import ( + "net" + "time" + + "golang.org/x/net/context" +) + +// Dial connects to the address addr on the network protocol. +// The address format is host:port, where host may be a hostname or an IP address. +// Known protocols are "tcp" and "udp". +// The returned connection satisfies net.Conn, and is valid while ctx is valid; +// if the connection is to be used after ctx becomes invalid, invoke SetContext +// with the new context. +func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { + conn, err := net.Dial(protocol, addr) + if err != nil { + return nil, err + } + return &Conn{conn}, nil +} + +// DialTimeout is like Dial but takes a timeout. +// The timeout includes name resolution, if required. +func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { + conn, err := net.DialTimeout(protocol, addr, timeout) + if err != nil { + return nil, err + } + return &Conn{conn}, nil +} + +// LookupIP returns the given host's IP addresses. +func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { + return net.LookupIP(host) +} + +// Conn represents a socket connection. +// It implements net.Conn. +type Conn struct { + net.Conn +} + +// SetContext sets the context that is used by this Conn. +// It is usually used only when using a Conn that was created in a different context, +// such as when a connection is created during a warmup request but used while +// servicing a user request. +func (cn *Conn) SetContext(ctx context.Context) { + // This function is not required in App Engine "flexible environment". +} + +// KeepAlive signals that the connection is still in use. +// It may be called to prevent the socket being closed due to inactivity. +func (cn *Conn) KeepAlive() error { + // This function is not required in App Engine "flexible environment". + return nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go new file mode 100644 index 000000000..9521b50e9 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go @@ -0,0 +1,54 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/annotations.proto + +package annotations // import "google.golang.org/genproto/googleapis/api/annotations" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +var E_Http = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MethodOptions)(nil), + ExtensionType: (*HttpRule)(nil), + Field: 72295728, + Name: "google.api.http", + Tag: "bytes,72295728,opt,name=http", + Filename: "google/api/annotations.proto", +} + +func init() { + proto.RegisterExtension(E_Http) +} + +func init() { + proto.RegisterFile("google/api/annotations.proto", fileDescriptor_annotations_55609bb51d80951d) +} + +var fileDescriptor_annotations_55609bb51d80951d = []byte{ + // 208 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, + 0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64, + 0x4a, 0x89, 0x22, 0xa9, 0xcc, 0x28, 0x29, 0x29, 0x80, 0x28, 0x91, 0x52, 0x80, 0x0a, 0x83, 0x79, + 0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45, 0x10, 0x15, + 0x56, 0xde, 0x5c, 0x2c, 0x20, 0xf5, 0x42, 0x72, 0x7a, 0x50, 0xd3, 0x60, 0x4a, 0xf5, 0x7c, 0x53, + 0x4b, 0x32, 0xf2, 0x53, 0xfc, 0x0b, 0xc0, 0x56, 0x4a, 0x6c, 0x38, 0xb5, 0x47, 0x49, 0x81, 0x51, + 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x61, 0xad, 0x9e, 0x47, 0x49, 0x49, 0x41, 0x50, 0x69, 0x4e, 0x6a, + 0x10, 0xd8, 0x10, 0xa7, 0x3c, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x24, 0x05, 0x4e, 0x02, 0x8e, 0x08, + 0x67, 0x07, 0x80, 0x4c, 0x0e, 0x60, 0x8c, 0x72, 0x84, 0xca, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, + 0xeb, 0xe5, 0x17, 0xa5, 0xeb, 0xa7, 0xa7, 0xe6, 0x81, 0xed, 0xd5, 0x87, 0x48, 0x25, 0x16, 0x64, + 0x16, 0xa3, 0x7b, 0xda, 0x1a, 0x89, 0xbd, 0x88, 0x89, 0xc5, 0xdd, 0x31, 0xc0, 0x33, 0x89, 0x0d, + 0xac, 0xc9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe3, 0x29, 0x19, 0x62, 0x28, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go new file mode 100644 index 000000000..1a8a27b65 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -0,0 +1,688 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/http.proto + +package annotations // import "google.golang.org/genproto/googleapis/api/annotations" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +type Http struct { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` + // When set to true, URL path parmeters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Http) Reset() { *m = Http{} } +func (m *Http) String() string { return proto.CompactTextString(m) } +func (*Http) ProtoMessage() {} +func (*Http) Descriptor() ([]byte, []int) { + return fileDescriptor_http_e457621dddd7365b, []int{0} +} +func (m *Http) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Http.Unmarshal(m, b) +} +func (m *Http) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Http.Marshal(b, m, deterministic) +} +func (dst *Http) XXX_Merge(src proto.Message) { + xxx_messageInfo_Http.Merge(dst, src) +} +func (m *Http) XXX_Size() int { + return xxx_messageInfo_Http.Size(m) +} +func (m *Http) XXX_DiscardUnknown() { + xxx_messageInfo_Http.DiscardUnknown(m) +} + +var xxx_messageInfo_Http proto.InternalMessageInfo + +func (m *Http) GetRules() []*HttpRule { + if m != nil { + return m.Rules + } + return nil +} + +func (m *Http) GetFullyDecodeReservedExpansion() bool { + if m != nil { + return m.FullyDecodeReservedExpansion + } + return false +} + +// `HttpRule` defines the mapping of an RPC method to one or more HTTP +// REST API methods. The mapping specifies how different portions of the RPC +// request message are mapped to URL path, URL query parameters, and +// HTTP request body. The mapping is typically specified as an +// `google.api.http` annotation on the RPC method, +// see "google/api/annotations.proto" for details. +// +// The mapping consists of a field specifying the path template and +// method kind. The path template can refer to fields in the request +// message, as in the example below which describes a REST GET +// operation on a resource collection of messages: +// +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}"; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // mapped to the URL +// SubMessage sub = 2; // `sub.subfield` is url-mapped +// } +// message Message { +// string text = 1; // content of the resource +// } +// +// The same http annotation can alternatively be expressed inside the +// `GRPC API Configuration` YAML file. +// +// http: +// rules: +// - selector: .Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// This definition enables an automatic, bidrectional mapping of HTTP +// JSON to RPC. Example: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))` +// +// In general, not only fields but also field paths can be referenced +// from a path pattern. Fields mapped to the path pattern cannot be +// repeated and must have a primitive (non-message) type. +// +// Any fields in the request message which are not bound by the path +// pattern automatically become (optional) HTTP query +// parameters. Assume the following definition of the request message: +// +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http).get = "/v1/messages/{message_id}"; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // mapped to the URL +// int64 revision = 2; // becomes a parameter +// SubMessage sub = 3; // `sub.subfield` becomes a parameter +// } +// +// +// This enables a HTTP JSON to RPC mapping as below: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))` +// +// Note that fields which are mapped to HTTP parameters must have a +// primitive type or a repeated primitive type. Message types are not +// allowed. In the case of a repeated type, the parameter can be +// repeated in the URL, as in `...?param=A¶m=B`. +// +// For HTTP method kinds which allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// put: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// HTTP | RPC +// -----|----- +// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// put: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// +// The following HTTP JSON to RPC mapping is enabled: +// +// HTTP | RPC +// -----|----- +// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice of +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// +// This enables the following two alternative HTTP JSON to RPC +// mappings: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")` +// +// # Rules for HTTP mapping +// +// The rules for mapping HTTP path, query parameters, and body fields +// to the request message are as follows: +// +// 1. The `body` field specifies either `*` or a field path, or is +// omitted. If omitted, it indicates there is no HTTP request body. +// 2. Leaf fields (recursive expansion of nested messages in the +// request) can be classified into three types: +// (a) Matched in the URL template. +// (b) Covered by body (if body is `*`, everything except (a) fields; +// else everything under the body field) +// (c) All other fields. +// 3. URL query parameters found in the HTTP request are mapped to (c) fields. +// 4. Any body sent with an HTTP request can contain only (b) fields. +// +// The syntax of the path template is as follows: +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single path segment. The syntax `**` matches zero +// or more path segments, which must be the last part of the path except the +// `Verb`. The syntax `LITERAL` matches literal text in the path. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path, all characters +// except `[-_.~0-9a-zA-Z]` are percent-encoded. Such variables show up in the +// Discovery Document as `{var}`. +// +// If a variable contains one or more path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path, all +// characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. Such variables +// show up in the Discovery Document as `{+var}`. +// +// NOTE: While the single segment variable matches the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 +// Simple String Expansion, the multi segment variable **does not** match +// RFC 6570 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. +// +// NOTE: the field paths in variables and in the `body` must not refer to +// repeated fields or map fields. +type HttpRule struct { + // Selects methods to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + // + // Types that are valid to be assigned to Pattern: + // *HttpRule_Get + // *HttpRule_Put + // *HttpRule_Post + // *HttpRule_Delete + // *HttpRule_Patch + // *HttpRule_Custom + Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"` + // The name of the request field whose value is mapped to the HTTP body, or + // `*` for mapping all fields not captured by the path pattern to the HTTP + // body. NOTE: the referred field must not be a repeated field and must be + // present at the top-level of request message type. + Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"` + // Optional. The name of the response field whose value is mapped to the HTTP + // body of response. Other response fields are ignored. When + // not set, the response message will be used as HTTP body of response. + ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"` + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HttpRule) Reset() { *m = HttpRule{} } +func (m *HttpRule) String() string { return proto.CompactTextString(m) } +func (*HttpRule) ProtoMessage() {} +func (*HttpRule) Descriptor() ([]byte, []int) { + return fileDescriptor_http_e457621dddd7365b, []int{1} +} +func (m *HttpRule) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HttpRule.Unmarshal(m, b) +} +func (m *HttpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HttpRule.Marshal(b, m, deterministic) +} +func (dst *HttpRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_HttpRule.Merge(dst, src) +} +func (m *HttpRule) XXX_Size() int { + return xxx_messageInfo_HttpRule.Size(m) +} +func (m *HttpRule) XXX_DiscardUnknown() { + xxx_messageInfo_HttpRule.DiscardUnknown(m) +} + +var xxx_messageInfo_HttpRule proto.InternalMessageInfo + +func (m *HttpRule) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +type isHttpRule_Pattern interface { + isHttpRule_Pattern() +} + +type HttpRule_Get struct { + Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"` +} + +type HttpRule_Put struct { + Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"` +} + +type HttpRule_Post struct { + Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"` +} + +type HttpRule_Delete struct { + Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"` +} + +type HttpRule_Patch struct { + Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"` +} + +type HttpRule_Custom struct { + Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` +} + +func (*HttpRule_Get) isHttpRule_Pattern() {} + +func (*HttpRule_Put) isHttpRule_Pattern() {} + +func (*HttpRule_Post) isHttpRule_Pattern() {} + +func (*HttpRule_Delete) isHttpRule_Pattern() {} + +func (*HttpRule_Patch) isHttpRule_Pattern() {} + +func (*HttpRule_Custom) isHttpRule_Pattern() {} + +func (m *HttpRule) GetPattern() isHttpRule_Pattern { + if m != nil { + return m.Pattern + } + return nil +} + +func (m *HttpRule) GetGet() string { + if x, ok := m.GetPattern().(*HttpRule_Get); ok { + return x.Get + } + return "" +} + +func (m *HttpRule) GetPut() string { + if x, ok := m.GetPattern().(*HttpRule_Put); ok { + return x.Put + } + return "" +} + +func (m *HttpRule) GetPost() string { + if x, ok := m.GetPattern().(*HttpRule_Post); ok { + return x.Post + } + return "" +} + +func (m *HttpRule) GetDelete() string { + if x, ok := m.GetPattern().(*HttpRule_Delete); ok { + return x.Delete + } + return "" +} + +func (m *HttpRule) GetPatch() string { + if x, ok := m.GetPattern().(*HttpRule_Patch); ok { + return x.Patch + } + return "" +} + +func (m *HttpRule) GetCustom() *CustomHttpPattern { + if x, ok := m.GetPattern().(*HttpRule_Custom); ok { + return x.Custom + } + return nil +} + +func (m *HttpRule) GetBody() string { + if m != nil { + return m.Body + } + return "" +} + +func (m *HttpRule) GetResponseBody() string { + if m != nil { + return m.ResponseBody + } + return "" +} + +func (m *HttpRule) GetAdditionalBindings() []*HttpRule { + if m != nil { + return m.AdditionalBindings + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*HttpRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _HttpRule_OneofMarshaler, _HttpRule_OneofUnmarshaler, _HttpRule_OneofSizer, []interface{}{ + (*HttpRule_Get)(nil), + (*HttpRule_Put)(nil), + (*HttpRule_Post)(nil), + (*HttpRule_Delete)(nil), + (*HttpRule_Patch)(nil), + (*HttpRule_Custom)(nil), + } +} + +func _HttpRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*HttpRule) + // pattern + switch x := m.Pattern.(type) { + case *HttpRule_Get: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Get) + case *HttpRule_Put: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Put) + case *HttpRule_Post: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Post) + case *HttpRule_Delete: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Delete) + case *HttpRule_Patch: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Patch) + case *HttpRule_Custom: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Custom); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("HttpRule.Pattern has unexpected type %T", x) + } + return nil +} + +func _HttpRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*HttpRule) + switch tag { + case 2: // pattern.get + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Get{x} + return true, err + case 3: // pattern.put + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Put{x} + return true, err + case 4: // pattern.post + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Post{x} + return true, err + case 5: // pattern.delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Delete{x} + return true, err + case 6: // pattern.patch + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Patch{x} + return true, err + case 8: // pattern.custom + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CustomHttpPattern) + err := b.DecodeMessage(msg) + m.Pattern = &HttpRule_Custom{msg} + return true, err + default: + return false, nil + } +} + +func _HttpRule_OneofSizer(msg proto.Message) (n int) { + m := msg.(*HttpRule) + // pattern + switch x := m.Pattern.(type) { + case *HttpRule_Get: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Get))) + n += len(x.Get) + case *HttpRule_Put: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Put))) + n += len(x.Put) + case *HttpRule_Post: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Post))) + n += len(x.Post) + case *HttpRule_Delete: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Delete))) + n += len(x.Delete) + case *HttpRule_Patch: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Patch))) + n += len(x.Patch) + case *HttpRule_Custom: + s := proto.Size(x.Custom) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A custom pattern is used for defining custom HTTP verb. +type CustomHttpPattern struct { + // The name of this custom HTTP verb. + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + // The path matched by this custom verb. + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} } +func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) } +func (*CustomHttpPattern) ProtoMessage() {} +func (*CustomHttpPattern) Descriptor() ([]byte, []int) { + return fileDescriptor_http_e457621dddd7365b, []int{2} +} +func (m *CustomHttpPattern) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CustomHttpPattern.Unmarshal(m, b) +} +func (m *CustomHttpPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CustomHttpPattern.Marshal(b, m, deterministic) +} +func (dst *CustomHttpPattern) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomHttpPattern.Merge(dst, src) +} +func (m *CustomHttpPattern) XXX_Size() int { + return xxx_messageInfo_CustomHttpPattern.Size(m) +} +func (m *CustomHttpPattern) XXX_DiscardUnknown() { + xxx_messageInfo_CustomHttpPattern.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomHttpPattern proto.InternalMessageInfo + +func (m *CustomHttpPattern) GetKind() string { + if m != nil { + return m.Kind + } + return "" +} + +func (m *CustomHttpPattern) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func init() { + proto.RegisterType((*Http)(nil), "google.api.Http") + proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule") + proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern") +} + +func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor_http_e457621dddd7365b) } + +var fileDescriptor_http_e457621dddd7365b = []byte{ + // 419 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x8e, 0xd3, 0x30, + 0x10, 0x86, 0x49, 0x9b, 0x76, 0xdb, 0xe9, 0x82, 0x84, 0x59, 0x90, 0x85, 0x40, 0x54, 0xe5, 0x52, + 0x71, 0x48, 0xa5, 0xe5, 0xc0, 0x61, 0x4f, 0x1b, 0xa8, 0x58, 0x6e, 0x55, 0x8e, 0x5c, 0x22, 0x37, + 0x1e, 0x52, 0x83, 0xd7, 0xb6, 0xe2, 0x09, 0xa2, 0xaf, 0xc3, 0x63, 0xf1, 0x24, 0x1c, 0x91, 0x9d, + 0x84, 0x56, 0x42, 0xe2, 0x36, 0xf3, 0xff, 0x9f, 0xa7, 0x7f, 0x27, 0x03, 0x4f, 0x6b, 0x6b, 0x6b, + 0x8d, 0x1b, 0xe1, 0xd4, 0xe6, 0x40, 0xe4, 0x32, 0xd7, 0x58, 0xb2, 0x0c, 0x3a, 0x39, 0x13, 0x4e, + 0xad, 0x8e, 0x90, 0xde, 0x11, 0x39, 0xf6, 0x06, 0x26, 0x4d, 0xab, 0xd1, 0xf3, 0x64, 0x39, 0x5e, + 0x2f, 0xae, 0xaf, 0xb2, 0x13, 0x93, 0x05, 0xa0, 0x68, 0x35, 0x16, 0x1d, 0xc2, 0xb6, 0xf0, 0xea, + 0x4b, 0xab, 0xf5, 0xb1, 0x94, 0x58, 0x59, 0x89, 0x65, 0x83, 0x1e, 0x9b, 0xef, 0x28, 0x4b, 0xfc, + 0xe1, 0x84, 0xf1, 0xca, 0x1a, 0x3e, 0x5a, 0x26, 0xeb, 0x59, 0xf1, 0x22, 0x62, 0x1f, 0x22, 0x55, + 0xf4, 0xd0, 0x76, 0x60, 0x56, 0xbf, 0x46, 0x30, 0x1b, 0x46, 0xb3, 0xe7, 0x30, 0xf3, 0xa8, 0xb1, + 0x22, 0xdb, 0xf0, 0x64, 0x99, 0xac, 0xe7, 0xc5, 0xdf, 0x9e, 0x31, 0x18, 0xd7, 0x48, 0x71, 0xe6, + 0xfc, 0xee, 0x41, 0x11, 0x9a, 0xa0, 0xb9, 0x96, 0xf8, 0x78, 0xd0, 0x5c, 0x4b, 0xec, 0x0a, 0x52, + 0x67, 0x3d, 0xf1, 0xb4, 0x17, 0x63, 0xc7, 0x38, 0x4c, 0x25, 0x6a, 0x24, 0xe4, 0x93, 0x5e, 0xef, + 0x7b, 0xf6, 0x0c, 0x26, 0x4e, 0x50, 0x75, 0xe0, 0xd3, 0xde, 0xe8, 0x5a, 0xf6, 0x0e, 0xa6, 0x55, + 0xeb, 0xc9, 0xde, 0xf3, 0xd9, 0x32, 0x59, 0x2f, 0xae, 0x5f, 0x9e, 0x2f, 0xe3, 0x7d, 0x74, 0x42, + 0xee, 0x9d, 0x20, 0xc2, 0xc6, 0x84, 0x81, 0x1d, 0xce, 0x18, 0xa4, 0x7b, 0x2b, 0x8f, 0xfc, 0x22, + 0xfe, 0x81, 0x58, 0xb3, 0xd7, 0xf0, 0xb0, 0x41, 0xef, 0xac, 0xf1, 0x58, 0x46, 0xf3, 0x32, 0x9a, + 0x97, 0x83, 0x98, 0x07, 0x68, 0x0b, 0x4f, 0x84, 0x94, 0x8a, 0x94, 0x35, 0x42, 0x97, 0x7b, 0x65, + 0xa4, 0x32, 0xb5, 0xe7, 0x8b, 0xff, 0x7c, 0x0b, 0x76, 0x7a, 0x90, 0xf7, 0x7c, 0x3e, 0x87, 0x0b, + 0xd7, 0x85, 0x5a, 0xdd, 0xc0, 0xe3, 0x7f, 0x92, 0x86, 0x7c, 0xdf, 0x94, 0x91, 0xfd, 0x82, 0x63, + 0x1d, 0x34, 0x27, 0xe8, 0xd0, 0x6d, 0xb7, 0x88, 0x75, 0xfe, 0x15, 0x1e, 0x55, 0xf6, 0xfe, 0xec, + 0x67, 0xf3, 0x79, 0x1c, 0x13, 0xae, 0x67, 0x97, 0x7c, 0xbe, 0xed, 0x8d, 0xda, 0x6a, 0x61, 0xea, + 0xcc, 0x36, 0xf5, 0xa6, 0x46, 0x13, 0x6f, 0x6b, 0xd3, 0x59, 0xc2, 0x29, 0x1f, 0xaf, 0x4e, 0x18, + 0x63, 0x49, 0x84, 0x98, 0xfe, 0xe6, 0xac, 0xfe, 0x9d, 0x24, 0x3f, 0x47, 0xe9, 0xc7, 0xdb, 0xdd, + 0xa7, 0xfd, 0x34, 0xbe, 0x7b, 0xfb, 0x27, 0x00, 0x00, 0xff, 0xff, 0xae, 0xde, 0xa1, 0xd0, 0xac, + 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/kms/v1/resources.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/kms/v1/resources.pb.go new file mode 100644 index 000000000..b304b2697 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/kms/v1/resources.pb.go @@ -0,0 +1,952 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/kms/v1/resources.proto + +package kms // import "google.golang.org/genproto/googleapis/cloud/kms/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import duration "github.com/golang/protobuf/ptypes/duration" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] specifies how cryptographic operations are performed. +type ProtectionLevel int32 + +const ( + // Not specified. + ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED ProtectionLevel = 0 + // Crypto operations are performed in software. + ProtectionLevel_SOFTWARE ProtectionLevel = 1 + // Crypto operations are performed in a Hardware Security Module. + ProtectionLevel_HSM ProtectionLevel = 2 +) + +var ProtectionLevel_name = map[int32]string{ + 0: "PROTECTION_LEVEL_UNSPECIFIED", + 1: "SOFTWARE", + 2: "HSM", +} +var ProtectionLevel_value = map[string]int32{ + "PROTECTION_LEVEL_UNSPECIFIED": 0, + "SOFTWARE": 1, + "HSM": 2, +} + +func (x ProtectionLevel) String() string { + return proto.EnumName(ProtectionLevel_name, int32(x)) +} +func (ProtectionLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_resources_0cd4d0dda71f2af2, []int{0} +} + +// [CryptoKeyPurpose][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose] describes the cryptographic capabilities of a +// [CryptoKey][google.cloud.kms.v1.CryptoKey]. A given key can only be used for the operations allowed by +// its purpose. +type CryptoKey_CryptoKeyPurpose int32 + +const ( + // Not specified. + CryptoKey_CRYPTO_KEY_PURPOSE_UNSPECIFIED CryptoKey_CryptoKeyPurpose = 0 + // [CryptoKeys][google.cloud.kms.v1.CryptoKey] with this purpose may be used with + // [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt] and + // [Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt]. + CryptoKey_ENCRYPT_DECRYPT CryptoKey_CryptoKeyPurpose = 1 + // [CryptoKeys][google.cloud.kms.v1.CryptoKey] with this purpose may be used with + // [AsymmetricSign][google.cloud.kms.v1.KeyManagementService.AsymmetricSign] and + // [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. + CryptoKey_ASYMMETRIC_SIGN CryptoKey_CryptoKeyPurpose = 5 + // [CryptoKeys][google.cloud.kms.v1.CryptoKey] with this purpose may be used with + // [AsymmetricDecrypt][google.cloud.kms.v1.KeyManagementService.AsymmetricDecrypt] and + // [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. + CryptoKey_ASYMMETRIC_DECRYPT CryptoKey_CryptoKeyPurpose = 6 +) + +var CryptoKey_CryptoKeyPurpose_name = map[int32]string{ + 0: "CRYPTO_KEY_PURPOSE_UNSPECIFIED", + 1: "ENCRYPT_DECRYPT", + 5: "ASYMMETRIC_SIGN", + 6: "ASYMMETRIC_DECRYPT", +} +var CryptoKey_CryptoKeyPurpose_value = map[string]int32{ + "CRYPTO_KEY_PURPOSE_UNSPECIFIED": 0, + "ENCRYPT_DECRYPT": 1, + "ASYMMETRIC_SIGN": 5, + "ASYMMETRIC_DECRYPT": 6, +} + +func (x CryptoKey_CryptoKeyPurpose) String() string { + return proto.EnumName(CryptoKey_CryptoKeyPurpose_name, int32(x)) +} +func (CryptoKey_CryptoKeyPurpose) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_resources_0cd4d0dda71f2af2, []int{1, 0} +} + +// Attestion formats provided by the HSM. +type KeyOperationAttestation_AttestationFormat int32 + +const ( + KeyOperationAttestation_ATTESTATION_FORMAT_UNSPECIFIED KeyOperationAttestation_AttestationFormat = 0 + // Cavium HSM attestation compressed with gzip. Note that this format is + // defined by Cavium and subject to change at any time. + KeyOperationAttestation_CAVIUM_V1_COMPRESSED KeyOperationAttestation_AttestationFormat = 3 +) + +var KeyOperationAttestation_AttestationFormat_name = map[int32]string{ + 0: "ATTESTATION_FORMAT_UNSPECIFIED", + 3: "CAVIUM_V1_COMPRESSED", +} +var KeyOperationAttestation_AttestationFormat_value = map[string]int32{ + "ATTESTATION_FORMAT_UNSPECIFIED": 0, + "CAVIUM_V1_COMPRESSED": 3, +} + +func (x KeyOperationAttestation_AttestationFormat) String() string { + return proto.EnumName(KeyOperationAttestation_AttestationFormat_name, int32(x)) +} +func (KeyOperationAttestation_AttestationFormat) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_resources_0cd4d0dda71f2af2, []int{3, 0} +} + +// The algorithm of the [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion], indicating what +// parameters must be used for each cryptographic operation. +// +// The +// [GOOGLE_SYMMETRIC_ENCRYPTION][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm.GOOGLE_SYMMETRIC_ENCRYPTION] +// algorithm is usable with [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] +// [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. +// +// Algorithms beginning with "RSA_SIGN_" are usable with [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] +// [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN]. +// +// The fields in the name after "RSA_SIGN_" correspond to the following +// parameters: padding algorithm, modulus bit length, and digest algorithm. +// +// For PSS, the salt length used is equal to the length of digest +// algorithm. For example, +// [RSA_SIGN_PSS_2048_SHA256][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm.RSA_SIGN_PSS_2048_SHA256] +// will use PSS with a salt length of 256 bits or 32 bytes. +// +// Algorithms beginning with "RSA_DECRYPT_" are usable with +// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] +// [ASYMMETRIC_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_DECRYPT]. +// +// The fields in the name after "RSA_DECRYPT_" correspond to the following +// parameters: padding algorithm, modulus bit length, and digest algorithm. +// +// Algorithms beginning with "EC_SIGN_" are usable with [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] +// [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN]. +// +// The fields in the name after "EC_SIGN_" correspond to the following +// parameters: elliptic curve, digest algorithm. +type CryptoKeyVersion_CryptoKeyVersionAlgorithm int32 + +const ( + // Not specified. + CryptoKeyVersion_CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED CryptoKeyVersion_CryptoKeyVersionAlgorithm = 0 + // Creates symmetric encryption keys. + CryptoKeyVersion_GOOGLE_SYMMETRIC_ENCRYPTION CryptoKeyVersion_CryptoKeyVersionAlgorithm = 1 + // RSASSA-PSS 2048 bit key with a SHA256 digest. + CryptoKeyVersion_RSA_SIGN_PSS_2048_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 2 + // RSASSA-PSS 3072 bit key with a SHA256 digest. + CryptoKeyVersion_RSA_SIGN_PSS_3072_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 3 + // RSASSA-PSS 4096 bit key with a SHA256 digest. + CryptoKeyVersion_RSA_SIGN_PSS_4096_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 4 + // RSASSA-PKCS1-v1_5 with a 2048 bit key and a SHA256 digest. + CryptoKeyVersion_RSA_SIGN_PKCS1_2048_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 5 + // RSASSA-PKCS1-v1_5 with a 3072 bit key and a SHA256 digest. + CryptoKeyVersion_RSA_SIGN_PKCS1_3072_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 6 + // RSASSA-PKCS1-v1_5 with a 4096 bit key and a SHA256 digest. + CryptoKeyVersion_RSA_SIGN_PKCS1_4096_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 7 + // RSAES-OAEP 2048 bit key with a SHA256 digest. + CryptoKeyVersion_RSA_DECRYPT_OAEP_2048_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 8 + // RSAES-OAEP 3072 bit key with a SHA256 digest. + CryptoKeyVersion_RSA_DECRYPT_OAEP_3072_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 9 + // RSAES-OAEP 4096 bit key with a SHA256 digest. + CryptoKeyVersion_RSA_DECRYPT_OAEP_4096_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 10 + // ECDSA on the NIST P-256 curve with a SHA256 digest. + CryptoKeyVersion_EC_SIGN_P256_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 12 + // ECDSA on the NIST P-384 curve with a SHA384 digest. + CryptoKeyVersion_EC_SIGN_P384_SHA384 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 13 +) + +var CryptoKeyVersion_CryptoKeyVersionAlgorithm_name = map[int32]string{ + 0: "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED", + 1: "GOOGLE_SYMMETRIC_ENCRYPTION", + 2: "RSA_SIGN_PSS_2048_SHA256", + 3: "RSA_SIGN_PSS_3072_SHA256", + 4: "RSA_SIGN_PSS_4096_SHA256", + 5: "RSA_SIGN_PKCS1_2048_SHA256", + 6: "RSA_SIGN_PKCS1_3072_SHA256", + 7: "RSA_SIGN_PKCS1_4096_SHA256", + 8: "RSA_DECRYPT_OAEP_2048_SHA256", + 9: "RSA_DECRYPT_OAEP_3072_SHA256", + 10: "RSA_DECRYPT_OAEP_4096_SHA256", + 12: "EC_SIGN_P256_SHA256", + 13: "EC_SIGN_P384_SHA384", +} +var CryptoKeyVersion_CryptoKeyVersionAlgorithm_value = map[string]int32{ + "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED": 0, + "GOOGLE_SYMMETRIC_ENCRYPTION": 1, + "RSA_SIGN_PSS_2048_SHA256": 2, + "RSA_SIGN_PSS_3072_SHA256": 3, + "RSA_SIGN_PSS_4096_SHA256": 4, + "RSA_SIGN_PKCS1_2048_SHA256": 5, + "RSA_SIGN_PKCS1_3072_SHA256": 6, + "RSA_SIGN_PKCS1_4096_SHA256": 7, + "RSA_DECRYPT_OAEP_2048_SHA256": 8, + "RSA_DECRYPT_OAEP_3072_SHA256": 9, + "RSA_DECRYPT_OAEP_4096_SHA256": 10, + "EC_SIGN_P256_SHA256": 12, + "EC_SIGN_P384_SHA384": 13, +} + +func (x CryptoKeyVersion_CryptoKeyVersionAlgorithm) String() string { + return proto.EnumName(CryptoKeyVersion_CryptoKeyVersionAlgorithm_name, int32(x)) +} +func (CryptoKeyVersion_CryptoKeyVersionAlgorithm) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_resources_0cd4d0dda71f2af2, []int{4, 0} +} + +// The state of a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion], indicating if it can be used. +type CryptoKeyVersion_CryptoKeyVersionState int32 + +const ( + // Not specified. + CryptoKeyVersion_CRYPTO_KEY_VERSION_STATE_UNSPECIFIED CryptoKeyVersion_CryptoKeyVersionState = 0 + // This version is still being generated. It may not be used, enabled, + // disabled, or destroyed yet. Cloud KMS will automatically mark this + // version [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] as soon as the version is ready. + CryptoKeyVersion_PENDING_GENERATION CryptoKeyVersion_CryptoKeyVersionState = 5 + // This version may be used for cryptographic operations. + CryptoKeyVersion_ENABLED CryptoKeyVersion_CryptoKeyVersionState = 1 + // This version may not be used, but the key material is still available, + // and the version can be placed back into the [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] state. + CryptoKeyVersion_DISABLED CryptoKeyVersion_CryptoKeyVersionState = 2 + // This version is destroyed, and the key material is no longer stored. + // A version may not leave this state once entered. + CryptoKeyVersion_DESTROYED CryptoKeyVersion_CryptoKeyVersionState = 3 + // This version is scheduled for destruction, and will be destroyed soon. + // Call + // [RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion] + // to put it back into the [DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED] state. + CryptoKeyVersion_DESTROY_SCHEDULED CryptoKeyVersion_CryptoKeyVersionState = 4 +) + +var CryptoKeyVersion_CryptoKeyVersionState_name = map[int32]string{ + 0: "CRYPTO_KEY_VERSION_STATE_UNSPECIFIED", + 5: "PENDING_GENERATION", + 1: "ENABLED", + 2: "DISABLED", + 3: "DESTROYED", + 4: "DESTROY_SCHEDULED", +} +var CryptoKeyVersion_CryptoKeyVersionState_value = map[string]int32{ + "CRYPTO_KEY_VERSION_STATE_UNSPECIFIED": 0, + "PENDING_GENERATION": 5, + "ENABLED": 1, + "DISABLED": 2, + "DESTROYED": 3, + "DESTROY_SCHEDULED": 4, +} + +func (x CryptoKeyVersion_CryptoKeyVersionState) String() string { + return proto.EnumName(CryptoKeyVersion_CryptoKeyVersionState_name, int32(x)) +} +func (CryptoKeyVersion_CryptoKeyVersionState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_resources_0cd4d0dda71f2af2, []int{4, 1} +} + +// A view for [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]s. Controls the level of detail returned +// for [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] in +// [KeyManagementService.ListCryptoKeyVersions][google.cloud.kms.v1.KeyManagementService.ListCryptoKeyVersions] and +// [KeyManagementService.ListCryptoKeys][google.cloud.kms.v1.KeyManagementService.ListCryptoKeys]. +type CryptoKeyVersion_CryptoKeyVersionView int32 + +const ( + // Default view for each [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. Does not include + // the [attestation][google.cloud.kms.v1.CryptoKeyVersion.attestation] field. + CryptoKeyVersion_CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED CryptoKeyVersion_CryptoKeyVersionView = 0 + // Provides all fields in each [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion], including the + // [attestation][google.cloud.kms.v1.CryptoKeyVersion.attestation]. + CryptoKeyVersion_FULL CryptoKeyVersion_CryptoKeyVersionView = 1 +) + +var CryptoKeyVersion_CryptoKeyVersionView_name = map[int32]string{ + 0: "CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED", + 1: "FULL", +} +var CryptoKeyVersion_CryptoKeyVersionView_value = map[string]int32{ + "CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED": 0, + "FULL": 1, +} + +func (x CryptoKeyVersion_CryptoKeyVersionView) String() string { + return proto.EnumName(CryptoKeyVersion_CryptoKeyVersionView_name, int32(x)) +} +func (CryptoKeyVersion_CryptoKeyVersionView) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_resources_0cd4d0dda71f2af2, []int{4, 2} +} + +// A [KeyRing][google.cloud.kms.v1.KeyRing] is a toplevel logical grouping of [CryptoKeys][google.cloud.kms.v1.CryptoKey]. +type KeyRing struct { + // Output only. The resource name for the [KeyRing][google.cloud.kms.v1.KeyRing] in the format + // `projects/*/locations/*/keyRings/*`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Output only. The time at which this [KeyRing][google.cloud.kms.v1.KeyRing] was created. + CreateTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyRing) Reset() { *m = KeyRing{} } +func (m *KeyRing) String() string { return proto.CompactTextString(m) } +func (*KeyRing) ProtoMessage() {} +func (*KeyRing) Descriptor() ([]byte, []int) { + return fileDescriptor_resources_0cd4d0dda71f2af2, []int{0} +} +func (m *KeyRing) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyRing.Unmarshal(m, b) +} +func (m *KeyRing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyRing.Marshal(b, m, deterministic) +} +func (dst *KeyRing) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyRing.Merge(dst, src) +} +func (m *KeyRing) XXX_Size() int { + return xxx_messageInfo_KeyRing.Size(m) +} +func (m *KeyRing) XXX_DiscardUnknown() { + xxx_messageInfo_KeyRing.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyRing proto.InternalMessageInfo + +func (m *KeyRing) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *KeyRing) GetCreateTime() *timestamp.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +// A [CryptoKey][google.cloud.kms.v1.CryptoKey] represents a logical key that can be used for cryptographic +// operations. +// +// A [CryptoKey][google.cloud.kms.v1.CryptoKey] is made up of one or more [versions][google.cloud.kms.v1.CryptoKeyVersion], which +// represent the actual key material used in cryptographic operations. +type CryptoKey struct { + // Output only. The resource name for this [CryptoKey][google.cloud.kms.v1.CryptoKey] in the format + // `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Output only. A copy of the "primary" [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] that will be used + // by [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt] when this [CryptoKey][google.cloud.kms.v1.CryptoKey] is given + // in [EncryptRequest.name][google.cloud.kms.v1.EncryptRequest.name]. + // + // The [CryptoKey][google.cloud.kms.v1.CryptoKey]'s primary version can be updated via + // [UpdateCryptoKeyPrimaryVersion][google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyPrimaryVersion]. + // + // All keys with [purpose][google.cloud.kms.v1.CryptoKey.purpose] + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT] have a + // primary. For other keys, this field will be omitted. + Primary *CryptoKeyVersion `protobuf:"bytes,2,opt,name=primary,proto3" json:"primary,omitempty"` + // The immutable purpose of this [CryptoKey][google.cloud.kms.v1.CryptoKey]. + Purpose CryptoKey_CryptoKeyPurpose `protobuf:"varint,3,opt,name=purpose,proto3,enum=google.cloud.kms.v1.CryptoKey_CryptoKeyPurpose" json:"purpose,omitempty"` + // Output only. The time at which this [CryptoKey][google.cloud.kms.v1.CryptoKey] was created. + CreateTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // At [next_rotation_time][google.cloud.kms.v1.CryptoKey.next_rotation_time], the Key Management Service will automatically: + // + // 1. Create a new version of this [CryptoKey][google.cloud.kms.v1.CryptoKey]. + // 2. Mark the new version as primary. + // + // Key rotations performed manually via + // [CreateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion] and + // [UpdateCryptoKeyPrimaryVersion][google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyPrimaryVersion] + // do not affect [next_rotation_time][google.cloud.kms.v1.CryptoKey.next_rotation_time]. + // + // Keys with [purpose][google.cloud.kms.v1.CryptoKey.purpose] + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT] support + // automatic rotation. For other keys, this field must be omitted. + NextRotationTime *timestamp.Timestamp `protobuf:"bytes,7,opt,name=next_rotation_time,json=nextRotationTime,proto3" json:"next_rotation_time,omitempty"` + // Controls the rate of automatic rotation. + // + // Types that are valid to be assigned to RotationSchedule: + // *CryptoKey_RotationPeriod + RotationSchedule isCryptoKey_RotationSchedule `protobuf_oneof:"rotation_schedule"` + // A template describing settings for new [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] instances. + // The properties of new [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] instances created by either + // [CreateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion] or + // auto-rotation are controlled by this template. + VersionTemplate *CryptoKeyVersionTemplate `protobuf:"bytes,11,opt,name=version_template,json=versionTemplate,proto3" json:"version_template,omitempty"` + // Labels with user-defined metadata. For more information, see + // [Labeling Keys](/kms/docs/labeling-keys). + Labels map[string]string `protobuf:"bytes,10,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CryptoKey) Reset() { *m = CryptoKey{} } +func (m *CryptoKey) String() string { return proto.CompactTextString(m) } +func (*CryptoKey) ProtoMessage() {} +func (*CryptoKey) Descriptor() ([]byte, []int) { + return fileDescriptor_resources_0cd4d0dda71f2af2, []int{1} +} +func (m *CryptoKey) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CryptoKey.Unmarshal(m, b) +} +func (m *CryptoKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CryptoKey.Marshal(b, m, deterministic) +} +func (dst *CryptoKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_CryptoKey.Merge(dst, src) +} +func (m *CryptoKey) XXX_Size() int { + return xxx_messageInfo_CryptoKey.Size(m) +} +func (m *CryptoKey) XXX_DiscardUnknown() { + xxx_messageInfo_CryptoKey.DiscardUnknown(m) +} + +var xxx_messageInfo_CryptoKey proto.InternalMessageInfo + +func (m *CryptoKey) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CryptoKey) GetPrimary() *CryptoKeyVersion { + if m != nil { + return m.Primary + } + return nil +} + +func (m *CryptoKey) GetPurpose() CryptoKey_CryptoKeyPurpose { + if m != nil { + return m.Purpose + } + return CryptoKey_CRYPTO_KEY_PURPOSE_UNSPECIFIED +} + +func (m *CryptoKey) GetCreateTime() *timestamp.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *CryptoKey) GetNextRotationTime() *timestamp.Timestamp { + if m != nil { + return m.NextRotationTime + } + return nil +} + +type isCryptoKey_RotationSchedule interface { + isCryptoKey_RotationSchedule() +} + +type CryptoKey_RotationPeriod struct { + RotationPeriod *duration.Duration `protobuf:"bytes,8,opt,name=rotation_period,json=rotationPeriod,proto3,oneof"` +} + +func (*CryptoKey_RotationPeriod) isCryptoKey_RotationSchedule() {} + +func (m *CryptoKey) GetRotationSchedule() isCryptoKey_RotationSchedule { + if m != nil { + return m.RotationSchedule + } + return nil +} + +func (m *CryptoKey) GetRotationPeriod() *duration.Duration { + if x, ok := m.GetRotationSchedule().(*CryptoKey_RotationPeriod); ok { + return x.RotationPeriod + } + return nil +} + +func (m *CryptoKey) GetVersionTemplate() *CryptoKeyVersionTemplate { + if m != nil { + return m.VersionTemplate + } + return nil +} + +func (m *CryptoKey) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*CryptoKey) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _CryptoKey_OneofMarshaler, _CryptoKey_OneofUnmarshaler, _CryptoKey_OneofSizer, []interface{}{ + (*CryptoKey_RotationPeriod)(nil), + } +} + +func _CryptoKey_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*CryptoKey) + // rotation_schedule + switch x := m.RotationSchedule.(type) { + case *CryptoKey_RotationPeriod: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RotationPeriod); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("CryptoKey.RotationSchedule has unexpected type %T", x) + } + return nil +} + +func _CryptoKey_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*CryptoKey) + switch tag { + case 8: // rotation_schedule.rotation_period + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(duration.Duration) + err := b.DecodeMessage(msg) + m.RotationSchedule = &CryptoKey_RotationPeriod{msg} + return true, err + default: + return false, nil + } +} + +func _CryptoKey_OneofSizer(msg proto.Message) (n int) { + m := msg.(*CryptoKey) + // rotation_schedule + switch x := m.RotationSchedule.(type) { + case *CryptoKey_RotationPeriod: + s := proto.Size(x.RotationPeriod) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A [CryptoKeyVersionTemplate][google.cloud.kms.v1.CryptoKeyVersionTemplate] specifies the properties to use when creating +// a new [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion], either manually with +// [CreateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion] or +// automatically as a result of auto-rotation. +type CryptoKeyVersionTemplate struct { + // [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] to use when creating a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] based on + // this template. Immutable. Defaults to [SOFTWARE][google.cloud.kms.v1.ProtectionLevel.SOFTWARE]. + ProtectionLevel ProtectionLevel `protobuf:"varint,1,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` + // Required. [Algorithm][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm] to use + // when creating a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] based on this template. + // + // For backwards compatibility, GOOGLE_SYMMETRIC_ENCRYPTION is implied if both + // this field is omitted and [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] is + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. + Algorithm CryptoKeyVersion_CryptoKeyVersionAlgorithm `protobuf:"varint,3,opt,name=algorithm,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionAlgorithm" json:"algorithm,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CryptoKeyVersionTemplate) Reset() { *m = CryptoKeyVersionTemplate{} } +func (m *CryptoKeyVersionTemplate) String() string { return proto.CompactTextString(m) } +func (*CryptoKeyVersionTemplate) ProtoMessage() {} +func (*CryptoKeyVersionTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_resources_0cd4d0dda71f2af2, []int{2} +} +func (m *CryptoKeyVersionTemplate) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CryptoKeyVersionTemplate.Unmarshal(m, b) +} +func (m *CryptoKeyVersionTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CryptoKeyVersionTemplate.Marshal(b, m, deterministic) +} +func (dst *CryptoKeyVersionTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_CryptoKeyVersionTemplate.Merge(dst, src) +} +func (m *CryptoKeyVersionTemplate) XXX_Size() int { + return xxx_messageInfo_CryptoKeyVersionTemplate.Size(m) +} +func (m *CryptoKeyVersionTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_CryptoKeyVersionTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_CryptoKeyVersionTemplate proto.InternalMessageInfo + +func (m *CryptoKeyVersionTemplate) GetProtectionLevel() ProtectionLevel { + if m != nil { + return m.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +func (m *CryptoKeyVersionTemplate) GetAlgorithm() CryptoKeyVersion_CryptoKeyVersionAlgorithm { + if m != nil { + return m.Algorithm + } + return CryptoKeyVersion_CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED +} + +// Contains an HSM-generated attestation about a key operation. +type KeyOperationAttestation struct { + // Output only. The format of the attestation data. + Format KeyOperationAttestation_AttestationFormat `protobuf:"varint,4,opt,name=format,proto3,enum=google.cloud.kms.v1.KeyOperationAttestation_AttestationFormat" json:"format,omitempty"` + // Output only. The attestation data provided by the HSM when the key + // operation was performed. + Content []byte `protobuf:"bytes,5,opt,name=content,proto3" json:"content,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyOperationAttestation) Reset() { *m = KeyOperationAttestation{} } +func (m *KeyOperationAttestation) String() string { return proto.CompactTextString(m) } +func (*KeyOperationAttestation) ProtoMessage() {} +func (*KeyOperationAttestation) Descriptor() ([]byte, []int) { + return fileDescriptor_resources_0cd4d0dda71f2af2, []int{3} +} +func (m *KeyOperationAttestation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyOperationAttestation.Unmarshal(m, b) +} +func (m *KeyOperationAttestation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyOperationAttestation.Marshal(b, m, deterministic) +} +func (dst *KeyOperationAttestation) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyOperationAttestation.Merge(dst, src) +} +func (m *KeyOperationAttestation) XXX_Size() int { + return xxx_messageInfo_KeyOperationAttestation.Size(m) +} +func (m *KeyOperationAttestation) XXX_DiscardUnknown() { + xxx_messageInfo_KeyOperationAttestation.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyOperationAttestation proto.InternalMessageInfo + +func (m *KeyOperationAttestation) GetFormat() KeyOperationAttestation_AttestationFormat { + if m != nil { + return m.Format + } + return KeyOperationAttestation_ATTESTATION_FORMAT_UNSPECIFIED +} + +func (m *KeyOperationAttestation) GetContent() []byte { + if m != nil { + return m.Content + } + return nil +} + +// A [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] represents an individual cryptographic key, and the +// associated key material. +// +// An [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] version can be +// used for cryptographic operations. +// +// For security reasons, the raw cryptographic key material represented by a +// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] can never be viewed or exported. It can only be used to +// encrypt, decrypt, or sign data when an authorized user or application invokes +// Cloud KMS. +type CryptoKeyVersion struct { + // Output only. The resource name for this [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in the format + // `projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The current state of the [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + State CryptoKeyVersion_CryptoKeyVersionState `protobuf:"varint,3,opt,name=state,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionState" json:"state,omitempty"` + // Output only. The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] describing how crypto operations are + // performed with this [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + ProtectionLevel ProtectionLevel `protobuf:"varint,7,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` + // Output only. The [CryptoKeyVersionAlgorithm][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm] that this + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] supports. + Algorithm CryptoKeyVersion_CryptoKeyVersionAlgorithm `protobuf:"varint,10,opt,name=algorithm,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionAlgorithm" json:"algorithm,omitempty"` + // Output only. Statement that was generated and signed by the HSM at key + // creation time. Use this statement to verify attributes of the key as stored + // on the HSM, independently of Google. Only provided for key versions with + // [protection_level][google.cloud.kms.v1.CryptoKeyVersion.protection_level] [HSM][google.cloud.kms.v1.ProtectionLevel.HSM]. + Attestation *KeyOperationAttestation `protobuf:"bytes,8,opt,name=attestation,proto3" json:"attestation,omitempty"` + // Output only. The time at which this [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] was created. + CreateTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // Output only. The time this [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s key material was + // generated. + GenerateTime *timestamp.Timestamp `protobuf:"bytes,11,opt,name=generate_time,json=generateTime,proto3" json:"generate_time,omitempty"` + // Output only. The time this [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s key material is scheduled + // for destruction. Only present if [state][google.cloud.kms.v1.CryptoKeyVersion.state] is + // [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED]. + DestroyTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=destroy_time,json=destroyTime,proto3" json:"destroy_time,omitempty"` + // Output only. The time this CryptoKeyVersion's key material was + // destroyed. Only present if [state][google.cloud.kms.v1.CryptoKeyVersion.state] is + // [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED]. + DestroyEventTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=destroy_event_time,json=destroyEventTime,proto3" json:"destroy_event_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CryptoKeyVersion) Reset() { *m = CryptoKeyVersion{} } +func (m *CryptoKeyVersion) String() string { return proto.CompactTextString(m) } +func (*CryptoKeyVersion) ProtoMessage() {} +func (*CryptoKeyVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_resources_0cd4d0dda71f2af2, []int{4} +} +func (m *CryptoKeyVersion) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CryptoKeyVersion.Unmarshal(m, b) +} +func (m *CryptoKeyVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CryptoKeyVersion.Marshal(b, m, deterministic) +} +func (dst *CryptoKeyVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_CryptoKeyVersion.Merge(dst, src) +} +func (m *CryptoKeyVersion) XXX_Size() int { + return xxx_messageInfo_CryptoKeyVersion.Size(m) +} +func (m *CryptoKeyVersion) XXX_DiscardUnknown() { + xxx_messageInfo_CryptoKeyVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_CryptoKeyVersion proto.InternalMessageInfo + +func (m *CryptoKeyVersion) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CryptoKeyVersion) GetState() CryptoKeyVersion_CryptoKeyVersionState { + if m != nil { + return m.State + } + return CryptoKeyVersion_CRYPTO_KEY_VERSION_STATE_UNSPECIFIED +} + +func (m *CryptoKeyVersion) GetProtectionLevel() ProtectionLevel { + if m != nil { + return m.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +func (m *CryptoKeyVersion) GetAlgorithm() CryptoKeyVersion_CryptoKeyVersionAlgorithm { + if m != nil { + return m.Algorithm + } + return CryptoKeyVersion_CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED +} + +func (m *CryptoKeyVersion) GetAttestation() *KeyOperationAttestation { + if m != nil { + return m.Attestation + } + return nil +} + +func (m *CryptoKeyVersion) GetCreateTime() *timestamp.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *CryptoKeyVersion) GetGenerateTime() *timestamp.Timestamp { + if m != nil { + return m.GenerateTime + } + return nil +} + +func (m *CryptoKeyVersion) GetDestroyTime() *timestamp.Timestamp { + if m != nil { + return m.DestroyTime + } + return nil +} + +func (m *CryptoKeyVersion) GetDestroyEventTime() *timestamp.Timestamp { + if m != nil { + return m.DestroyEventTime + } + return nil +} + +// The public key for a given [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. Obtained via +// [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. +type PublicKey struct { + // The public key, encoded in PEM format. For more information, see the + // [RFC 7468](https://tools.ietf.org/html/rfc7468) sections for + // [General Considerations](https://tools.ietf.org/html/rfc7468#section-2) and + // [Textual Encoding of Subject Public Key Info] + // (https://tools.ietf.org/html/rfc7468#section-13). + Pem string `protobuf:"bytes,1,opt,name=pem,proto3" json:"pem,omitempty"` + // The [Algorithm][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm] associated + // with this key. + Algorithm CryptoKeyVersion_CryptoKeyVersionAlgorithm `protobuf:"varint,2,opt,name=algorithm,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionAlgorithm" json:"algorithm,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PublicKey) Reset() { *m = PublicKey{} } +func (m *PublicKey) String() string { return proto.CompactTextString(m) } +func (*PublicKey) ProtoMessage() {} +func (*PublicKey) Descriptor() ([]byte, []int) { + return fileDescriptor_resources_0cd4d0dda71f2af2, []int{5} +} +func (m *PublicKey) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PublicKey.Unmarshal(m, b) +} +func (m *PublicKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PublicKey.Marshal(b, m, deterministic) +} +func (dst *PublicKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_PublicKey.Merge(dst, src) +} +func (m *PublicKey) XXX_Size() int { + return xxx_messageInfo_PublicKey.Size(m) +} +func (m *PublicKey) XXX_DiscardUnknown() { + xxx_messageInfo_PublicKey.DiscardUnknown(m) +} + +var xxx_messageInfo_PublicKey proto.InternalMessageInfo + +func (m *PublicKey) GetPem() string { + if m != nil { + return m.Pem + } + return "" +} + +func (m *PublicKey) GetAlgorithm() CryptoKeyVersion_CryptoKeyVersionAlgorithm { + if m != nil { + return m.Algorithm + } + return CryptoKeyVersion_CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED +} + +func init() { + proto.RegisterType((*KeyRing)(nil), "google.cloud.kms.v1.KeyRing") + proto.RegisterType((*CryptoKey)(nil), "google.cloud.kms.v1.CryptoKey") + proto.RegisterMapType((map[string]string)(nil), "google.cloud.kms.v1.CryptoKey.LabelsEntry") + proto.RegisterType((*CryptoKeyVersionTemplate)(nil), "google.cloud.kms.v1.CryptoKeyVersionTemplate") + proto.RegisterType((*KeyOperationAttestation)(nil), "google.cloud.kms.v1.KeyOperationAttestation") + proto.RegisterType((*CryptoKeyVersion)(nil), "google.cloud.kms.v1.CryptoKeyVersion") + proto.RegisterType((*PublicKey)(nil), "google.cloud.kms.v1.PublicKey") + proto.RegisterEnum("google.cloud.kms.v1.ProtectionLevel", ProtectionLevel_name, ProtectionLevel_value) + proto.RegisterEnum("google.cloud.kms.v1.CryptoKey_CryptoKeyPurpose", CryptoKey_CryptoKeyPurpose_name, CryptoKey_CryptoKeyPurpose_value) + proto.RegisterEnum("google.cloud.kms.v1.KeyOperationAttestation_AttestationFormat", KeyOperationAttestation_AttestationFormat_name, KeyOperationAttestation_AttestationFormat_value) + proto.RegisterEnum("google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionAlgorithm", CryptoKeyVersion_CryptoKeyVersionAlgorithm_name, CryptoKeyVersion_CryptoKeyVersionAlgorithm_value) + proto.RegisterEnum("google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionState", CryptoKeyVersion_CryptoKeyVersionState_name, CryptoKeyVersion_CryptoKeyVersionState_value) + proto.RegisterEnum("google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionView", CryptoKeyVersion_CryptoKeyVersionView_name, CryptoKeyVersion_CryptoKeyVersionView_value) +} + +func init() { + proto.RegisterFile("google/cloud/kms/v1/resources.proto", fileDescriptor_resources_0cd4d0dda71f2af2) +} + +var fileDescriptor_resources_0cd4d0dda71f2af2 = []byte{ + // 1197 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x5d, 0x6e, 0xdb, 0x46, + 0x10, 0x0e, 0x25, 0xcb, 0xb2, 0x46, 0x4e, 0x4c, 0xaf, 0xf3, 0xa3, 0xb8, 0x41, 0x62, 0x28, 0x29, + 0x6a, 0x04, 0xa9, 0x14, 0xcb, 0x4e, 0xea, 0x34, 0x68, 0x03, 0x5a, 0x5a, 0x4b, 0xac, 0x7e, 0xc8, + 0x2c, 0x69, 0xa5, 0x0e, 0x52, 0x10, 0xb4, 0xbc, 0x51, 0x04, 0x8b, 0x3f, 0x20, 0x29, 0x35, 0x02, + 0x7a, 0x8d, 0xbe, 0xf4, 0x02, 0x05, 0x7a, 0x86, 0x9e, 0x20, 0x87, 0x28, 0x7a, 0x8c, 0x3e, 0x16, + 0x5c, 0x2e, 0x6d, 0x49, 0x66, 0xea, 0xa4, 0xf0, 0x93, 0x76, 0x67, 0xe6, 0xfb, 0x66, 0x34, 0x33, + 0xfc, 0x48, 0xb8, 0xdf, 0x77, 0x9c, 0xfe, 0x90, 0x96, 0x7b, 0x43, 0x67, 0x74, 0x5c, 0x3e, 0xb1, + 0xfc, 0xf2, 0x78, 0xab, 0xec, 0x51, 0xdf, 0x19, 0x79, 0x3d, 0xea, 0x97, 0x5c, 0xcf, 0x09, 0x1c, + 0xb4, 0x16, 0x05, 0x95, 0x58, 0x50, 0xe9, 0xc4, 0xf2, 0x4b, 0xe3, 0xad, 0xf5, 0x3b, 0x1c, 0x69, + 0xba, 0x83, 0xb2, 0x69, 0xdb, 0x4e, 0x60, 0x06, 0x03, 0xc7, 0xe6, 0x90, 0xf5, 0xbb, 0xdc, 0xcb, + 0x6e, 0x47, 0xa3, 0xb7, 0xe5, 0xe3, 0x91, 0xc7, 0x02, 0xb8, 0xff, 0xde, 0xbc, 0x3f, 0x18, 0x58, + 0xd4, 0x0f, 0x4c, 0xcb, 0x8d, 0x02, 0x8a, 0xaf, 0x21, 0xdb, 0xa4, 0x13, 0x32, 0xb0, 0xfb, 0x08, + 0xc1, 0x82, 0x6d, 0x5a, 0xb4, 0x20, 0x6c, 0x08, 0x9b, 0x39, 0xc2, 0xce, 0xe8, 0x39, 0xe4, 0x7b, + 0x1e, 0x35, 0x03, 0x6a, 0x84, 0xc0, 0x42, 0x6a, 0x43, 0xd8, 0xcc, 0x57, 0xd6, 0x4b, 0xbc, 0xd0, + 0x98, 0xb5, 0xa4, 0xc7, 0xac, 0x04, 0xa2, 0xf0, 0xd0, 0x50, 0xfc, 0x3b, 0x03, 0xb9, 0xaa, 0x37, + 0x71, 0x03, 0xa7, 0x49, 0x27, 0x89, 0xf4, 0x2f, 0x20, 0xeb, 0x7a, 0x03, 0xcb, 0xf4, 0x26, 0x9c, + 0xfa, 0xcb, 0x52, 0x42, 0x0f, 0x4a, 0xa7, 0x24, 0x5d, 0xea, 0xf9, 0x03, 0xc7, 0x26, 0x31, 0x0a, + 0xc9, 0x90, 0x75, 0x47, 0x9e, 0xeb, 0xf8, 0xb4, 0x90, 0xde, 0x10, 0x36, 0xaf, 0x55, 0xca, 0xff, + 0x4d, 0x70, 0x76, 0x52, 0x23, 0x18, 0x89, 0xf1, 0xf3, 0x7f, 0x35, 0xf3, 0x39, 0x7f, 0x15, 0x35, + 0x00, 0xd9, 0xf4, 0x7d, 0x60, 0x78, 0x7c, 0x3e, 0x11, 0x47, 0xf6, 0x42, 0x0e, 0x31, 0x44, 0x11, + 0x0e, 0x62, 0x4c, 0x35, 0x58, 0x39, 0x25, 0x71, 0xa9, 0x37, 0x70, 0x8e, 0x0b, 0x4b, 0x8c, 0xe6, + 0xf6, 0x39, 0x9a, 0x1a, 0x9f, 0x75, 0xe3, 0x0a, 0xb9, 0x16, 0x63, 0x54, 0x06, 0x41, 0x3f, 0x82, + 0x38, 0x8e, 0x7a, 0x65, 0x04, 0xd4, 0x72, 0x87, 0x66, 0x40, 0x0b, 0x79, 0x46, 0xf3, 0xf5, 0x27, + 0x75, 0x58, 0xe7, 0x20, 0xb2, 0x32, 0x9e, 0x35, 0xa0, 0x3d, 0x58, 0x1c, 0x9a, 0x47, 0x74, 0xe8, + 0x17, 0x60, 0x23, 0xbd, 0x99, 0xaf, 0x3c, 0xbc, 0xa0, 0xe1, 0x2d, 0x16, 0x8c, 0xed, 0xc0, 0x9b, + 0x10, 0x8e, 0x5c, 0x7f, 0x06, 0xf9, 0x29, 0x33, 0x12, 0x21, 0x7d, 0x42, 0x27, 0x7c, 0x31, 0xc2, + 0x23, 0xba, 0x0e, 0x99, 0xb1, 0x39, 0x1c, 0x45, 0x0b, 0x97, 0x23, 0xd1, 0xe5, 0xdb, 0xd4, 0xae, + 0x50, 0x7c, 0x0f, 0xe2, 0xfc, 0x08, 0x51, 0x11, 0xee, 0x56, 0xc9, 0xa1, 0xaa, 0x2b, 0x46, 0x13, + 0x1f, 0x1a, 0xea, 0x01, 0x51, 0x15, 0x0d, 0x1b, 0x07, 0x1d, 0x4d, 0xc5, 0x55, 0x79, 0x5f, 0xc6, + 0x35, 0xf1, 0x0a, 0x5a, 0x83, 0x15, 0xdc, 0x61, 0x51, 0x46, 0x0d, 0xb3, 0x5f, 0x51, 0x08, 0x8d, + 0x92, 0x76, 0xd8, 0x6e, 0x63, 0x9d, 0xc8, 0x55, 0x43, 0x93, 0xeb, 0x1d, 0x31, 0x83, 0x6e, 0x02, + 0x9a, 0x32, 0xc6, 0xc1, 0x8b, 0x7b, 0x6b, 0xb0, 0x7a, 0x3a, 0x18, 0xbf, 0xf7, 0x8e, 0x1e, 0x8f, + 0x86, 0xb4, 0xf8, 0x41, 0x80, 0xc2, 0xc7, 0x7a, 0x87, 0x14, 0x10, 0xc3, 0x59, 0xd1, 0x1e, 0xc3, + 0x0c, 0xe9, 0x98, 0x0e, 0xd9, 0x9f, 0xbc, 0x56, 0x79, 0x90, 0xd8, 0x34, 0xf5, 0x34, 0xb8, 0x15, + 0xc6, 0x92, 0x15, 0x77, 0xd6, 0x80, 0x7e, 0x82, 0x9c, 0x39, 0xec, 0x3b, 0xde, 0x20, 0x78, 0x67, + 0xf1, 0x7d, 0x7f, 0xf1, 0x49, 0xe3, 0x3c, 0x67, 0x90, 0x62, 0x1a, 0x72, 0xc6, 0x58, 0xfc, 0x4b, + 0x80, 0x5b, 0x4d, 0x3a, 0x51, 0x5c, 0x1a, 0xed, 0x95, 0x14, 0x04, 0xe1, 0x9a, 0x86, 0x47, 0xd4, + 0x85, 0xc5, 0xb7, 0x8e, 0x67, 0x99, 0x41, 0x61, 0x81, 0xe5, 0xfd, 0x3e, 0x31, 0xef, 0x47, 0xd0, + 0xa5, 0xa9, 0xf3, 0x3e, 0x63, 0x21, 0x9c, 0x0d, 0x15, 0x20, 0xdb, 0x73, 0xec, 0x80, 0xda, 0x01, + 0x7b, 0xe2, 0x96, 0x49, 0x7c, 0x2d, 0xbe, 0x84, 0xd5, 0x73, 0xb0, 0x70, 0xd4, 0x92, 0xae, 0x63, + 0x4d, 0x97, 0x74, 0x59, 0xe9, 0x18, 0xfb, 0x0a, 0x69, 0x4b, 0xfa, 0xdc, 0xa8, 0x0b, 0x70, 0xbd, + 0x2a, 0x75, 0xe5, 0x83, 0xb6, 0xd1, 0xdd, 0x32, 0xaa, 0x4a, 0x5b, 0x25, 0x58, 0xd3, 0x70, 0x4d, + 0x4c, 0x17, 0x7f, 0x87, 0xa9, 0xed, 0xe1, 0x9d, 0x48, 0xd4, 0xa5, 0x97, 0x90, 0x09, 0xf3, 0xc6, + 0xa2, 0xf2, 0xfc, 0xff, 0x35, 0x59, 0x0b, 0x29, 0x48, 0xc4, 0x94, 0xb8, 0x0c, 0xd9, 0x4b, 0x5b, + 0x06, 0xb8, 0xec, 0x65, 0x40, 0x1d, 0xc8, 0x9b, 0x67, 0xed, 0xe7, 0x1a, 0xf4, 0xe8, 0x73, 0xa6, + 0x4e, 0xa6, 0x09, 0xe6, 0xe5, 0x75, 0xe1, 0xb3, 0xe4, 0xf5, 0x05, 0x5c, 0xed, 0x53, 0x3b, 0x4c, + 0xc1, 0xe1, 0xf9, 0x0b, 0xe1, 0xcb, 0x31, 0x80, 0x11, 0x7c, 0x07, 0xcb, 0xc7, 0xd4, 0x0f, 0x3c, + 0x67, 0xf2, 0xa9, 0xea, 0x9e, 0xe7, 0xf1, 0xb1, 0xbc, 0xc7, 0x70, 0x3a, 0xa6, 0x76, 0x10, 0x91, + 0x2c, 0x5e, 0x2c, 0xef, 0x1c, 0x85, 0x43, 0x10, 0x7b, 0x27, 0xfe, 0x99, 0x86, 0xdb, 0x1f, 0xed, + 0x3f, 0x7a, 0x04, 0x9b, 0x53, 0x4a, 0xd6, 0xc5, 0x44, 0x0b, 0xb7, 0x5c, 0x6a, 0xd5, 0x15, 0x22, + 0xeb, 0x8d, 0xf6, 0xdc, 0xa2, 0xdf, 0x83, 0x2f, 0xea, 0x8a, 0x52, 0x6f, 0x61, 0xe3, 0x4c, 0xaf, + 0xb8, 0xc8, 0xc9, 0x4a, 0x47, 0x14, 0xd0, 0x1d, 0x28, 0x10, 0x4d, 0x62, 0xc2, 0x66, 0xa8, 0x9a, + 0x66, 0x54, 0x1e, 0xef, 0xec, 0x1a, 0x5a, 0x43, 0xaa, 0x3c, 0x79, 0x2a, 0xa6, 0xce, 0x79, 0xb7, + 0x1f, 0x7f, 0x53, 0x89, 0xbd, 0xe9, 0x73, 0xde, 0x9d, 0xc7, 0xcf, 0x9e, 0xc6, 0xde, 0x05, 0x74, + 0x17, 0xd6, 0xcf, 0xbc, 0xcd, 0xaa, 0xb6, 0x35, 0xc3, 0x9d, 0x49, 0xf0, 0x4f, 0xb3, 0x2f, 0x26, + 0xf8, 0xa7, 0xf9, 0xb3, 0x68, 0x03, 0xee, 0x84, 0x7e, 0xae, 0xbe, 0x86, 0x22, 0x61, 0x75, 0x26, + 0xc3, 0x52, 0x62, 0xc4, 0x74, 0x8e, 0x5c, 0x62, 0xc4, 0x74, 0x16, 0x40, 0xb7, 0x60, 0x0d, 0x57, + 0x79, 0x11, 0x95, 0x27, 0xa7, 0x8e, 0xe5, 0x19, 0xc7, 0xf6, 0xee, 0x4e, 0xe8, 0xd8, 0xde, 0xdd, + 0x11, 0xaf, 0x16, 0x7f, 0x13, 0xe0, 0x46, 0xe2, 0x63, 0x8e, 0x36, 0xe1, 0x41, 0xc2, 0xe8, 0x42, + 0xa1, 0x9a, 0x7f, 0x15, 0xdd, 0x04, 0xa4, 0xe2, 0x4e, 0x4d, 0xee, 0xd4, 0x8d, 0x3a, 0xee, 0x60, + 0xc2, 0xa4, 0x4c, 0xcc, 0xa0, 0x3c, 0x64, 0x71, 0x47, 0xda, 0x6b, 0xe1, 0x9a, 0x28, 0xa0, 0x65, + 0x58, 0xaa, 0xc9, 0x5a, 0x74, 0x4b, 0xa1, 0xab, 0x90, 0xab, 0x61, 0x4d, 0x27, 0xca, 0x61, 0xa8, + 0x63, 0xe8, 0x06, 0xac, 0xf2, 0xab, 0xa1, 0x55, 0x1b, 0xb8, 0x76, 0x10, 0x46, 0x2d, 0x14, 0x65, + 0xb8, 0x3e, 0x5f, 0x5b, 0x77, 0x40, 0x7f, 0x46, 0x5f, 0xc1, 0xfd, 0x84, 0xd2, 0xba, 0x32, 0x7e, + 0x35, 0x57, 0xd9, 0x12, 0x2c, 0xec, 0x1f, 0xb4, 0x5a, 0xa2, 0x50, 0xfc, 0x05, 0x72, 0xea, 0xe8, + 0x68, 0x38, 0xe8, 0x85, 0x5f, 0x6e, 0x22, 0xa4, 0x5d, 0x6a, 0xc5, 0xef, 0x67, 0x97, 0x5a, 0xb3, + 0xda, 0x93, 0xba, 0x6c, 0xed, 0x79, 0xf8, 0x03, 0xac, 0xcc, 0xc9, 0x5f, 0x38, 0x4c, 0x95, 0x28, + 0x3a, 0xae, 0x32, 0xdd, 0x6f, 0xe1, 0x2e, 0x6e, 0xcd, 0x15, 0xbf, 0x0c, 0x4b, 0x9a, 0xb2, 0xaf, + 0xbf, 0x92, 0x08, 0x16, 0x05, 0x94, 0x85, 0x74, 0x43, 0x6b, 0x8b, 0xa9, 0xbd, 0x5f, 0x05, 0xb8, + 0xd5, 0x73, 0xac, 0xa4, 0xea, 0xf6, 0x56, 0x9b, 0x96, 0x4f, 0xe2, 0x8f, 0xf0, 0x30, 0xa3, 0xa3, + 0x0a, 0xaf, 0x9f, 0xf2, 0xc8, 0xbe, 0x33, 0x34, 0xed, 0x7e, 0xc9, 0xf1, 0xfa, 0xe5, 0x3e, 0xb5, + 0xd9, 0xc3, 0x5d, 0x8e, 0x5c, 0xa6, 0x3b, 0xf0, 0x67, 0xbe, 0xe4, 0x9f, 0x9f, 0x58, 0xfe, 0x3f, + 0x82, 0xf0, 0x47, 0x6a, 0xad, 0x1e, 0x61, 0xab, 0x2c, 0x4b, 0xd3, 0xf2, 0x4b, 0xdd, 0xad, 0x0f, + 0xb1, 0xf5, 0x0d, 0xb3, 0xbe, 0x69, 0x5a, 0xfe, 0x9b, 0xee, 0xd6, 0xd1, 0x22, 0x63, 0xdc, 0xfe, + 0x37, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x00, 0x2e, 0x55, 0x1a, 0x0c, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/kms/v1/service.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/kms/v1/service.pb.go new file mode 100644 index 000000000..6a0a6cabc --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/kms/v1/service.pb.go @@ -0,0 +1,2529 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/kms/v1/service.proto + +package kms // import "google.golang.org/genproto/googleapis/cloud/kms/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/golang/protobuf/ptypes/struct" +import _ "github.com/golang/protobuf/ptypes/wrappers" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import field_mask "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Request message for [KeyManagementService.ListKeyRings][google.cloud.kms.v1.KeyManagementService.ListKeyRings]. +type ListKeyRingsRequest struct { + // Required. The resource name of the location associated with the + // [KeyRings][google.cloud.kms.v1.KeyRing], in the format `projects/*/locations/*`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional limit on the number of [KeyRings][google.cloud.kms.v1.KeyRing] to include in the + // response. Further [KeyRings][google.cloud.kms.v1.KeyRing] can subsequently be obtained by + // including the [ListKeyRingsResponse.next_page_token][google.cloud.kms.v1.ListKeyRingsResponse.next_page_token] in a subsequent + // request. If unspecified, the server will pick an appropriate default. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional pagination token, returned earlier via + // [ListKeyRingsResponse.next_page_token][google.cloud.kms.v1.ListKeyRingsResponse.next_page_token]. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListKeyRingsRequest) Reset() { *m = ListKeyRingsRequest{} } +func (m *ListKeyRingsRequest) String() string { return proto.CompactTextString(m) } +func (*ListKeyRingsRequest) ProtoMessage() {} +func (*ListKeyRingsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{0} +} +func (m *ListKeyRingsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListKeyRingsRequest.Unmarshal(m, b) +} +func (m *ListKeyRingsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListKeyRingsRequest.Marshal(b, m, deterministic) +} +func (dst *ListKeyRingsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListKeyRingsRequest.Merge(dst, src) +} +func (m *ListKeyRingsRequest) XXX_Size() int { + return xxx_messageInfo_ListKeyRingsRequest.Size(m) +} +func (m *ListKeyRingsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListKeyRingsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListKeyRingsRequest proto.InternalMessageInfo + +func (m *ListKeyRingsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListKeyRingsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListKeyRingsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Request message for [KeyManagementService.ListCryptoKeys][google.cloud.kms.v1.KeyManagementService.ListCryptoKeys]. +type ListCryptoKeysRequest struct { + // Required. The resource name of the [KeyRing][google.cloud.kms.v1.KeyRing] to list, in the format + // `projects/*/locations/*/keyRings/*`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional limit on the number of [CryptoKeys][google.cloud.kms.v1.CryptoKey] to include in the + // response. Further [CryptoKeys][google.cloud.kms.v1.CryptoKey] can subsequently be obtained by + // including the [ListCryptoKeysResponse.next_page_token][google.cloud.kms.v1.ListCryptoKeysResponse.next_page_token] in a subsequent + // request. If unspecified, the server will pick an appropriate default. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional pagination token, returned earlier via + // [ListCryptoKeysResponse.next_page_token][google.cloud.kms.v1.ListCryptoKeysResponse.next_page_token]. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // The fields of the primary version to include in the response. + VersionView CryptoKeyVersion_CryptoKeyVersionView `protobuf:"varint,4,opt,name=version_view,json=versionView,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionView" json:"version_view,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListCryptoKeysRequest) Reset() { *m = ListCryptoKeysRequest{} } +func (m *ListCryptoKeysRequest) String() string { return proto.CompactTextString(m) } +func (*ListCryptoKeysRequest) ProtoMessage() {} +func (*ListCryptoKeysRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{1} +} +func (m *ListCryptoKeysRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListCryptoKeysRequest.Unmarshal(m, b) +} +func (m *ListCryptoKeysRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListCryptoKeysRequest.Marshal(b, m, deterministic) +} +func (dst *ListCryptoKeysRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListCryptoKeysRequest.Merge(dst, src) +} +func (m *ListCryptoKeysRequest) XXX_Size() int { + return xxx_messageInfo_ListCryptoKeysRequest.Size(m) +} +func (m *ListCryptoKeysRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListCryptoKeysRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListCryptoKeysRequest proto.InternalMessageInfo + +func (m *ListCryptoKeysRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListCryptoKeysRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListCryptoKeysRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListCryptoKeysRequest) GetVersionView() CryptoKeyVersion_CryptoKeyVersionView { + if m != nil { + return m.VersionView + } + return CryptoKeyVersion_CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED +} + +// Request message for [KeyManagementService.ListCryptoKeyVersions][google.cloud.kms.v1.KeyManagementService.ListCryptoKeyVersions]. +type ListCryptoKeyVersionsRequest struct { + // Required. The resource name of the [CryptoKey][google.cloud.kms.v1.CryptoKey] to list, in the format + // `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional limit on the number of [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] to + // include in the response. Further [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] can + // subsequently be obtained by including the + // [ListCryptoKeyVersionsResponse.next_page_token][google.cloud.kms.v1.ListCryptoKeyVersionsResponse.next_page_token] in a subsequent request. + // If unspecified, the server will pick an appropriate default. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional pagination token, returned earlier via + // [ListCryptoKeyVersionsResponse.next_page_token][google.cloud.kms.v1.ListCryptoKeyVersionsResponse.next_page_token]. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // The fields to include in the response. + View CryptoKeyVersion_CryptoKeyVersionView `protobuf:"varint,4,opt,name=view,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionView" json:"view,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListCryptoKeyVersionsRequest) Reset() { *m = ListCryptoKeyVersionsRequest{} } +func (m *ListCryptoKeyVersionsRequest) String() string { return proto.CompactTextString(m) } +func (*ListCryptoKeyVersionsRequest) ProtoMessage() {} +func (*ListCryptoKeyVersionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{2} +} +func (m *ListCryptoKeyVersionsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListCryptoKeyVersionsRequest.Unmarshal(m, b) +} +func (m *ListCryptoKeyVersionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListCryptoKeyVersionsRequest.Marshal(b, m, deterministic) +} +func (dst *ListCryptoKeyVersionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListCryptoKeyVersionsRequest.Merge(dst, src) +} +func (m *ListCryptoKeyVersionsRequest) XXX_Size() int { + return xxx_messageInfo_ListCryptoKeyVersionsRequest.Size(m) +} +func (m *ListCryptoKeyVersionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListCryptoKeyVersionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListCryptoKeyVersionsRequest proto.InternalMessageInfo + +func (m *ListCryptoKeyVersionsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListCryptoKeyVersionsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListCryptoKeyVersionsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListCryptoKeyVersionsRequest) GetView() CryptoKeyVersion_CryptoKeyVersionView { + if m != nil { + return m.View + } + return CryptoKeyVersion_CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED +} + +// Response message for [KeyManagementService.ListKeyRings][google.cloud.kms.v1.KeyManagementService.ListKeyRings]. +type ListKeyRingsResponse struct { + // The list of [KeyRings][google.cloud.kms.v1.KeyRing]. + KeyRings []*KeyRing `protobuf:"bytes,1,rep,name=key_rings,json=keyRings,proto3" json:"key_rings,omitempty"` + // A token to retrieve next page of results. Pass this value in + // [ListKeyRingsRequest.page_token][google.cloud.kms.v1.ListKeyRingsRequest.page_token] to retrieve the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of [KeyRings][google.cloud.kms.v1.KeyRing] that matched the query. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListKeyRingsResponse) Reset() { *m = ListKeyRingsResponse{} } +func (m *ListKeyRingsResponse) String() string { return proto.CompactTextString(m) } +func (*ListKeyRingsResponse) ProtoMessage() {} +func (*ListKeyRingsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{3} +} +func (m *ListKeyRingsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListKeyRingsResponse.Unmarshal(m, b) +} +func (m *ListKeyRingsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListKeyRingsResponse.Marshal(b, m, deterministic) +} +func (dst *ListKeyRingsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListKeyRingsResponse.Merge(dst, src) +} +func (m *ListKeyRingsResponse) XXX_Size() int { + return xxx_messageInfo_ListKeyRingsResponse.Size(m) +} +func (m *ListKeyRingsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListKeyRingsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListKeyRingsResponse proto.InternalMessageInfo + +func (m *ListKeyRingsResponse) GetKeyRings() []*KeyRing { + if m != nil { + return m.KeyRings + } + return nil +} + +func (m *ListKeyRingsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func (m *ListKeyRingsResponse) GetTotalSize() int32 { + if m != nil { + return m.TotalSize + } + return 0 +} + +// Response message for [KeyManagementService.ListCryptoKeys][google.cloud.kms.v1.KeyManagementService.ListCryptoKeys]. +type ListCryptoKeysResponse struct { + // The list of [CryptoKeys][google.cloud.kms.v1.CryptoKey]. + CryptoKeys []*CryptoKey `protobuf:"bytes,1,rep,name=crypto_keys,json=cryptoKeys,proto3" json:"crypto_keys,omitempty"` + // A token to retrieve next page of results. Pass this value in + // [ListCryptoKeysRequest.page_token][google.cloud.kms.v1.ListCryptoKeysRequest.page_token] to retrieve the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of [CryptoKeys][google.cloud.kms.v1.CryptoKey] that matched the query. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListCryptoKeysResponse) Reset() { *m = ListCryptoKeysResponse{} } +func (m *ListCryptoKeysResponse) String() string { return proto.CompactTextString(m) } +func (*ListCryptoKeysResponse) ProtoMessage() {} +func (*ListCryptoKeysResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{4} +} +func (m *ListCryptoKeysResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListCryptoKeysResponse.Unmarshal(m, b) +} +func (m *ListCryptoKeysResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListCryptoKeysResponse.Marshal(b, m, deterministic) +} +func (dst *ListCryptoKeysResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListCryptoKeysResponse.Merge(dst, src) +} +func (m *ListCryptoKeysResponse) XXX_Size() int { + return xxx_messageInfo_ListCryptoKeysResponse.Size(m) +} +func (m *ListCryptoKeysResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListCryptoKeysResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListCryptoKeysResponse proto.InternalMessageInfo + +func (m *ListCryptoKeysResponse) GetCryptoKeys() []*CryptoKey { + if m != nil { + return m.CryptoKeys + } + return nil +} + +func (m *ListCryptoKeysResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func (m *ListCryptoKeysResponse) GetTotalSize() int32 { + if m != nil { + return m.TotalSize + } + return 0 +} + +// Response message for [KeyManagementService.ListCryptoKeyVersions][google.cloud.kms.v1.KeyManagementService.ListCryptoKeyVersions]. +type ListCryptoKeyVersionsResponse struct { + // The list of [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion]. + CryptoKeyVersions []*CryptoKeyVersion `protobuf:"bytes,1,rep,name=crypto_key_versions,json=cryptoKeyVersions,proto3" json:"crypto_key_versions,omitempty"` + // A token to retrieve next page of results. Pass this value in + // [ListCryptoKeyVersionsRequest.page_token][google.cloud.kms.v1.ListCryptoKeyVersionsRequest.page_token] to retrieve the next page of + // results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] that matched the + // query. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListCryptoKeyVersionsResponse) Reset() { *m = ListCryptoKeyVersionsResponse{} } +func (m *ListCryptoKeyVersionsResponse) String() string { return proto.CompactTextString(m) } +func (*ListCryptoKeyVersionsResponse) ProtoMessage() {} +func (*ListCryptoKeyVersionsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{5} +} +func (m *ListCryptoKeyVersionsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListCryptoKeyVersionsResponse.Unmarshal(m, b) +} +func (m *ListCryptoKeyVersionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListCryptoKeyVersionsResponse.Marshal(b, m, deterministic) +} +func (dst *ListCryptoKeyVersionsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListCryptoKeyVersionsResponse.Merge(dst, src) +} +func (m *ListCryptoKeyVersionsResponse) XXX_Size() int { + return xxx_messageInfo_ListCryptoKeyVersionsResponse.Size(m) +} +func (m *ListCryptoKeyVersionsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListCryptoKeyVersionsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListCryptoKeyVersionsResponse proto.InternalMessageInfo + +func (m *ListCryptoKeyVersionsResponse) GetCryptoKeyVersions() []*CryptoKeyVersion { + if m != nil { + return m.CryptoKeyVersions + } + return nil +} + +func (m *ListCryptoKeyVersionsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func (m *ListCryptoKeyVersionsResponse) GetTotalSize() int32 { + if m != nil { + return m.TotalSize + } + return 0 +} + +// Request message for [KeyManagementService.GetKeyRing][google.cloud.kms.v1.KeyManagementService.GetKeyRing]. +type GetKeyRingRequest struct { + // The [name][google.cloud.kms.v1.KeyRing.name] of the [KeyRing][google.cloud.kms.v1.KeyRing] to get. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetKeyRingRequest) Reset() { *m = GetKeyRingRequest{} } +func (m *GetKeyRingRequest) String() string { return proto.CompactTextString(m) } +func (*GetKeyRingRequest) ProtoMessage() {} +func (*GetKeyRingRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{6} +} +func (m *GetKeyRingRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetKeyRingRequest.Unmarshal(m, b) +} +func (m *GetKeyRingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetKeyRingRequest.Marshal(b, m, deterministic) +} +func (dst *GetKeyRingRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetKeyRingRequest.Merge(dst, src) +} +func (m *GetKeyRingRequest) XXX_Size() int { + return xxx_messageInfo_GetKeyRingRequest.Size(m) +} +func (m *GetKeyRingRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetKeyRingRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetKeyRingRequest proto.InternalMessageInfo + +func (m *GetKeyRingRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for [KeyManagementService.GetCryptoKey][google.cloud.kms.v1.KeyManagementService.GetCryptoKey]. +type GetCryptoKeyRequest struct { + // The [name][google.cloud.kms.v1.CryptoKey.name] of the [CryptoKey][google.cloud.kms.v1.CryptoKey] to get. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetCryptoKeyRequest) Reset() { *m = GetCryptoKeyRequest{} } +func (m *GetCryptoKeyRequest) String() string { return proto.CompactTextString(m) } +func (*GetCryptoKeyRequest) ProtoMessage() {} +func (*GetCryptoKeyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{7} +} +func (m *GetCryptoKeyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetCryptoKeyRequest.Unmarshal(m, b) +} +func (m *GetCryptoKeyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetCryptoKeyRequest.Marshal(b, m, deterministic) +} +func (dst *GetCryptoKeyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetCryptoKeyRequest.Merge(dst, src) +} +func (m *GetCryptoKeyRequest) XXX_Size() int { + return xxx_messageInfo_GetCryptoKeyRequest.Size(m) +} +func (m *GetCryptoKeyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetCryptoKeyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetCryptoKeyRequest proto.InternalMessageInfo + +func (m *GetCryptoKeyRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for [KeyManagementService.GetCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.GetCryptoKeyVersion]. +type GetCryptoKeyVersionRequest struct { + // The [name][google.cloud.kms.v1.CryptoKeyVersion.name] of the [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to get. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetCryptoKeyVersionRequest) Reset() { *m = GetCryptoKeyVersionRequest{} } +func (m *GetCryptoKeyVersionRequest) String() string { return proto.CompactTextString(m) } +func (*GetCryptoKeyVersionRequest) ProtoMessage() {} +func (*GetCryptoKeyVersionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{8} +} +func (m *GetCryptoKeyVersionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetCryptoKeyVersionRequest.Unmarshal(m, b) +} +func (m *GetCryptoKeyVersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetCryptoKeyVersionRequest.Marshal(b, m, deterministic) +} +func (dst *GetCryptoKeyVersionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetCryptoKeyVersionRequest.Merge(dst, src) +} +func (m *GetCryptoKeyVersionRequest) XXX_Size() int { + return xxx_messageInfo_GetCryptoKeyVersionRequest.Size(m) +} +func (m *GetCryptoKeyVersionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetCryptoKeyVersionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetCryptoKeyVersionRequest proto.InternalMessageInfo + +func (m *GetCryptoKeyVersionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for [KeyManagementService.GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. +type GetPublicKeyRequest struct { + // The [name][google.cloud.kms.v1.CryptoKeyVersion.name] of the [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] public key to + // get. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPublicKeyRequest) Reset() { *m = GetPublicKeyRequest{} } +func (m *GetPublicKeyRequest) String() string { return proto.CompactTextString(m) } +func (*GetPublicKeyRequest) ProtoMessage() {} +func (*GetPublicKeyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{9} +} +func (m *GetPublicKeyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPublicKeyRequest.Unmarshal(m, b) +} +func (m *GetPublicKeyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPublicKeyRequest.Marshal(b, m, deterministic) +} +func (dst *GetPublicKeyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPublicKeyRequest.Merge(dst, src) +} +func (m *GetPublicKeyRequest) XXX_Size() int { + return xxx_messageInfo_GetPublicKeyRequest.Size(m) +} +func (m *GetPublicKeyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPublicKeyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPublicKeyRequest proto.InternalMessageInfo + +func (m *GetPublicKeyRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for [KeyManagementService.CreateKeyRing][google.cloud.kms.v1.KeyManagementService.CreateKeyRing]. +type CreateKeyRingRequest struct { + // Required. The resource name of the location associated with the + // [KeyRings][google.cloud.kms.v1.KeyRing], in the format `projects/*/locations/*`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. It must be unique within a location and match the regular + // expression `[a-zA-Z0-9_-]{1,63}` + KeyRingId string `protobuf:"bytes,2,opt,name=key_ring_id,json=keyRingId,proto3" json:"key_ring_id,omitempty"` + // A [KeyRing][google.cloud.kms.v1.KeyRing] with initial field values. + KeyRing *KeyRing `protobuf:"bytes,3,opt,name=key_ring,json=keyRing,proto3" json:"key_ring,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateKeyRingRequest) Reset() { *m = CreateKeyRingRequest{} } +func (m *CreateKeyRingRequest) String() string { return proto.CompactTextString(m) } +func (*CreateKeyRingRequest) ProtoMessage() {} +func (*CreateKeyRingRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{10} +} +func (m *CreateKeyRingRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateKeyRingRequest.Unmarshal(m, b) +} +func (m *CreateKeyRingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateKeyRingRequest.Marshal(b, m, deterministic) +} +func (dst *CreateKeyRingRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateKeyRingRequest.Merge(dst, src) +} +func (m *CreateKeyRingRequest) XXX_Size() int { + return xxx_messageInfo_CreateKeyRingRequest.Size(m) +} +func (m *CreateKeyRingRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateKeyRingRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateKeyRingRequest proto.InternalMessageInfo + +func (m *CreateKeyRingRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateKeyRingRequest) GetKeyRingId() string { + if m != nil { + return m.KeyRingId + } + return "" +} + +func (m *CreateKeyRingRequest) GetKeyRing() *KeyRing { + if m != nil { + return m.KeyRing + } + return nil +} + +// Request message for [KeyManagementService.CreateCryptoKey][google.cloud.kms.v1.KeyManagementService.CreateCryptoKey]. +type CreateCryptoKeyRequest struct { + // Required. The [name][google.cloud.kms.v1.KeyRing.name] of the KeyRing associated with the + // [CryptoKeys][google.cloud.kms.v1.CryptoKey]. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. It must be unique within a KeyRing and match the regular + // expression `[a-zA-Z0-9_-]{1,63}` + CryptoKeyId string `protobuf:"bytes,2,opt,name=crypto_key_id,json=cryptoKeyId,proto3" json:"crypto_key_id,omitempty"` + // A [CryptoKey][google.cloud.kms.v1.CryptoKey] with initial field values. + CryptoKey *CryptoKey `protobuf:"bytes,3,opt,name=crypto_key,json=cryptoKey,proto3" json:"crypto_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateCryptoKeyRequest) Reset() { *m = CreateCryptoKeyRequest{} } +func (m *CreateCryptoKeyRequest) String() string { return proto.CompactTextString(m) } +func (*CreateCryptoKeyRequest) ProtoMessage() {} +func (*CreateCryptoKeyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{11} +} +func (m *CreateCryptoKeyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateCryptoKeyRequest.Unmarshal(m, b) +} +func (m *CreateCryptoKeyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateCryptoKeyRequest.Marshal(b, m, deterministic) +} +func (dst *CreateCryptoKeyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateCryptoKeyRequest.Merge(dst, src) +} +func (m *CreateCryptoKeyRequest) XXX_Size() int { + return xxx_messageInfo_CreateCryptoKeyRequest.Size(m) +} +func (m *CreateCryptoKeyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateCryptoKeyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateCryptoKeyRequest proto.InternalMessageInfo + +func (m *CreateCryptoKeyRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateCryptoKeyRequest) GetCryptoKeyId() string { + if m != nil { + return m.CryptoKeyId + } + return "" +} + +func (m *CreateCryptoKeyRequest) GetCryptoKey() *CryptoKey { + if m != nil { + return m.CryptoKey + } + return nil +} + +// Request message for [KeyManagementService.CreateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion]. +type CreateCryptoKeyVersionRequest struct { + // Required. The [name][google.cloud.kms.v1.CryptoKey.name] of the [CryptoKey][google.cloud.kms.v1.CryptoKey] associated with + // the [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion]. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // A [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with initial field values. + CryptoKeyVersion *CryptoKeyVersion `protobuf:"bytes,2,opt,name=crypto_key_version,json=cryptoKeyVersion,proto3" json:"crypto_key_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateCryptoKeyVersionRequest) Reset() { *m = CreateCryptoKeyVersionRequest{} } +func (m *CreateCryptoKeyVersionRequest) String() string { return proto.CompactTextString(m) } +func (*CreateCryptoKeyVersionRequest) ProtoMessage() {} +func (*CreateCryptoKeyVersionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{12} +} +func (m *CreateCryptoKeyVersionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateCryptoKeyVersionRequest.Unmarshal(m, b) +} +func (m *CreateCryptoKeyVersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateCryptoKeyVersionRequest.Marshal(b, m, deterministic) +} +func (dst *CreateCryptoKeyVersionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateCryptoKeyVersionRequest.Merge(dst, src) +} +func (m *CreateCryptoKeyVersionRequest) XXX_Size() int { + return xxx_messageInfo_CreateCryptoKeyVersionRequest.Size(m) +} +func (m *CreateCryptoKeyVersionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateCryptoKeyVersionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateCryptoKeyVersionRequest proto.InternalMessageInfo + +func (m *CreateCryptoKeyVersionRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateCryptoKeyVersionRequest) GetCryptoKeyVersion() *CryptoKeyVersion { + if m != nil { + return m.CryptoKeyVersion + } + return nil +} + +// Request message for [KeyManagementService.UpdateCryptoKey][google.cloud.kms.v1.KeyManagementService.UpdateCryptoKey]. +type UpdateCryptoKeyRequest struct { + // [CryptoKey][google.cloud.kms.v1.CryptoKey] with updated values. + CryptoKey *CryptoKey `protobuf:"bytes,1,opt,name=crypto_key,json=cryptoKey,proto3" json:"crypto_key,omitempty"` + // Required list of fields to be updated in this request. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateCryptoKeyRequest) Reset() { *m = UpdateCryptoKeyRequest{} } +func (m *UpdateCryptoKeyRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateCryptoKeyRequest) ProtoMessage() {} +func (*UpdateCryptoKeyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{13} +} +func (m *UpdateCryptoKeyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateCryptoKeyRequest.Unmarshal(m, b) +} +func (m *UpdateCryptoKeyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateCryptoKeyRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateCryptoKeyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateCryptoKeyRequest.Merge(dst, src) +} +func (m *UpdateCryptoKeyRequest) XXX_Size() int { + return xxx_messageInfo_UpdateCryptoKeyRequest.Size(m) +} +func (m *UpdateCryptoKeyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateCryptoKeyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateCryptoKeyRequest proto.InternalMessageInfo + +func (m *UpdateCryptoKeyRequest) GetCryptoKey() *CryptoKey { + if m != nil { + return m.CryptoKey + } + return nil +} + +func (m *UpdateCryptoKeyRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +// Request message for [KeyManagementService.UpdateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyVersion]. +type UpdateCryptoKeyVersionRequest struct { + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with updated values. + CryptoKeyVersion *CryptoKeyVersion `protobuf:"bytes,1,opt,name=crypto_key_version,json=cryptoKeyVersion,proto3" json:"crypto_key_version,omitempty"` + // Required list of fields to be updated in this request. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateCryptoKeyVersionRequest) Reset() { *m = UpdateCryptoKeyVersionRequest{} } +func (m *UpdateCryptoKeyVersionRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateCryptoKeyVersionRequest) ProtoMessage() {} +func (*UpdateCryptoKeyVersionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{14} +} +func (m *UpdateCryptoKeyVersionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateCryptoKeyVersionRequest.Unmarshal(m, b) +} +func (m *UpdateCryptoKeyVersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateCryptoKeyVersionRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateCryptoKeyVersionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateCryptoKeyVersionRequest.Merge(dst, src) +} +func (m *UpdateCryptoKeyVersionRequest) XXX_Size() int { + return xxx_messageInfo_UpdateCryptoKeyVersionRequest.Size(m) +} +func (m *UpdateCryptoKeyVersionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateCryptoKeyVersionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateCryptoKeyVersionRequest proto.InternalMessageInfo + +func (m *UpdateCryptoKeyVersionRequest) GetCryptoKeyVersion() *CryptoKeyVersion { + if m != nil { + return m.CryptoKeyVersion + } + return nil +} + +func (m *UpdateCryptoKeyVersionRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +// Request message for [KeyManagementService.Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. +type EncryptRequest struct { + // Required. The resource name of the [CryptoKey][google.cloud.kms.v1.CryptoKey] or [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] + // to use for encryption. + // + // If a [CryptoKey][google.cloud.kms.v1.CryptoKey] is specified, the server will use its + // [primary version][google.cloud.kms.v1.CryptoKey.primary]. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The data to encrypt. Must be no larger than 64KiB. + // + // The maximum size depends on the key version's + // [protection_level][google.cloud.kms.v1.CryptoKeyVersionTemplate.protection_level]. For + // [SOFTWARE][google.cloud.kms.v1.ProtectionLevel.SOFTWARE] keys, the plaintext must be no larger + // than 64KiB. For [HSM][google.cloud.kms.v1.ProtectionLevel.HSM] keys, the combined length of the + // plaintext and additional_authenticated_data fields must be no larger than + // 8KiB. + Plaintext []byte `protobuf:"bytes,2,opt,name=plaintext,proto3" json:"plaintext,omitempty"` + // Optional data that, if specified, must also be provided during decryption + // through [DecryptRequest.additional_authenticated_data][google.cloud.kms.v1.DecryptRequest.additional_authenticated_data]. + // + // The maximum size depends on the key version's + // [protection_level][google.cloud.kms.v1.CryptoKeyVersionTemplate.protection_level]. For + // [SOFTWARE][google.cloud.kms.v1.ProtectionLevel.SOFTWARE] keys, the AAD must be no larger than + // 64KiB. For [HSM][google.cloud.kms.v1.ProtectionLevel.HSM] keys, the combined length of the + // plaintext and additional_authenticated_data fields must be no larger than + // 8KiB. + AdditionalAuthenticatedData []byte `protobuf:"bytes,3,opt,name=additional_authenticated_data,json=additionalAuthenticatedData,proto3" json:"additional_authenticated_data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EncryptRequest) Reset() { *m = EncryptRequest{} } +func (m *EncryptRequest) String() string { return proto.CompactTextString(m) } +func (*EncryptRequest) ProtoMessage() {} +func (*EncryptRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{15} +} +func (m *EncryptRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EncryptRequest.Unmarshal(m, b) +} +func (m *EncryptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EncryptRequest.Marshal(b, m, deterministic) +} +func (dst *EncryptRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EncryptRequest.Merge(dst, src) +} +func (m *EncryptRequest) XXX_Size() int { + return xxx_messageInfo_EncryptRequest.Size(m) +} +func (m *EncryptRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EncryptRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_EncryptRequest proto.InternalMessageInfo + +func (m *EncryptRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EncryptRequest) GetPlaintext() []byte { + if m != nil { + return m.Plaintext + } + return nil +} + +func (m *EncryptRequest) GetAdditionalAuthenticatedData() []byte { + if m != nil { + return m.AdditionalAuthenticatedData + } + return nil +} + +// Request message for [KeyManagementService.Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt]. +type DecryptRequest struct { + // Required. The resource name of the [CryptoKey][google.cloud.kms.v1.CryptoKey] to use for decryption. + // The server will choose the appropriate version. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The encrypted data originally returned in + // [EncryptResponse.ciphertext][google.cloud.kms.v1.EncryptResponse.ciphertext]. + Ciphertext []byte `protobuf:"bytes,2,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"` + // Optional data that must match the data originally supplied in + // [EncryptRequest.additional_authenticated_data][google.cloud.kms.v1.EncryptRequest.additional_authenticated_data]. + AdditionalAuthenticatedData []byte `protobuf:"bytes,3,opt,name=additional_authenticated_data,json=additionalAuthenticatedData,proto3" json:"additional_authenticated_data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DecryptRequest) Reset() { *m = DecryptRequest{} } +func (m *DecryptRequest) String() string { return proto.CompactTextString(m) } +func (*DecryptRequest) ProtoMessage() {} +func (*DecryptRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{16} +} +func (m *DecryptRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DecryptRequest.Unmarshal(m, b) +} +func (m *DecryptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DecryptRequest.Marshal(b, m, deterministic) +} +func (dst *DecryptRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DecryptRequest.Merge(dst, src) +} +func (m *DecryptRequest) XXX_Size() int { + return xxx_messageInfo_DecryptRequest.Size(m) +} +func (m *DecryptRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DecryptRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DecryptRequest proto.InternalMessageInfo + +func (m *DecryptRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DecryptRequest) GetCiphertext() []byte { + if m != nil { + return m.Ciphertext + } + return nil +} + +func (m *DecryptRequest) GetAdditionalAuthenticatedData() []byte { + if m != nil { + return m.AdditionalAuthenticatedData + } + return nil +} + +// Request message for [KeyManagementService.AsymmetricSign][google.cloud.kms.v1.KeyManagementService.AsymmetricSign]. +type AsymmetricSignRequest struct { + // Required. The resource name of the [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to use for signing. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The digest of the data to sign. The digest must be produced with + // the same digest algorithm as specified by the key version's + // [algorithm][google.cloud.kms.v1.CryptoKeyVersion.algorithm]. + Digest *Digest `protobuf:"bytes,3,opt,name=digest,proto3" json:"digest,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AsymmetricSignRequest) Reset() { *m = AsymmetricSignRequest{} } +func (m *AsymmetricSignRequest) String() string { return proto.CompactTextString(m) } +func (*AsymmetricSignRequest) ProtoMessage() {} +func (*AsymmetricSignRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{17} +} +func (m *AsymmetricSignRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AsymmetricSignRequest.Unmarshal(m, b) +} +func (m *AsymmetricSignRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AsymmetricSignRequest.Marshal(b, m, deterministic) +} +func (dst *AsymmetricSignRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AsymmetricSignRequest.Merge(dst, src) +} +func (m *AsymmetricSignRequest) XXX_Size() int { + return xxx_messageInfo_AsymmetricSignRequest.Size(m) +} +func (m *AsymmetricSignRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AsymmetricSignRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AsymmetricSignRequest proto.InternalMessageInfo + +func (m *AsymmetricSignRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AsymmetricSignRequest) GetDigest() *Digest { + if m != nil { + return m.Digest + } + return nil +} + +// Request message for [KeyManagementService.AsymmetricDecrypt][google.cloud.kms.v1.KeyManagementService.AsymmetricDecrypt]. +type AsymmetricDecryptRequest struct { + // Required. The resource name of the [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to use for + // decryption. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The data encrypted with the named [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s public + // key using OAEP. + Ciphertext []byte `protobuf:"bytes,3,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AsymmetricDecryptRequest) Reset() { *m = AsymmetricDecryptRequest{} } +func (m *AsymmetricDecryptRequest) String() string { return proto.CompactTextString(m) } +func (*AsymmetricDecryptRequest) ProtoMessage() {} +func (*AsymmetricDecryptRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{18} +} +func (m *AsymmetricDecryptRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AsymmetricDecryptRequest.Unmarshal(m, b) +} +func (m *AsymmetricDecryptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AsymmetricDecryptRequest.Marshal(b, m, deterministic) +} +func (dst *AsymmetricDecryptRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AsymmetricDecryptRequest.Merge(dst, src) +} +func (m *AsymmetricDecryptRequest) XXX_Size() int { + return xxx_messageInfo_AsymmetricDecryptRequest.Size(m) +} +func (m *AsymmetricDecryptRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AsymmetricDecryptRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AsymmetricDecryptRequest proto.InternalMessageInfo + +func (m *AsymmetricDecryptRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AsymmetricDecryptRequest) GetCiphertext() []byte { + if m != nil { + return m.Ciphertext + } + return nil +} + +// Response message for [KeyManagementService.Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt]. +type DecryptResponse struct { + // The decrypted data originally supplied in [EncryptRequest.plaintext][google.cloud.kms.v1.EncryptRequest.plaintext]. + Plaintext []byte `protobuf:"bytes,1,opt,name=plaintext,proto3" json:"plaintext,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DecryptResponse) Reset() { *m = DecryptResponse{} } +func (m *DecryptResponse) String() string { return proto.CompactTextString(m) } +func (*DecryptResponse) ProtoMessage() {} +func (*DecryptResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{19} +} +func (m *DecryptResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DecryptResponse.Unmarshal(m, b) +} +func (m *DecryptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DecryptResponse.Marshal(b, m, deterministic) +} +func (dst *DecryptResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DecryptResponse.Merge(dst, src) +} +func (m *DecryptResponse) XXX_Size() int { + return xxx_messageInfo_DecryptResponse.Size(m) +} +func (m *DecryptResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DecryptResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DecryptResponse proto.InternalMessageInfo + +func (m *DecryptResponse) GetPlaintext() []byte { + if m != nil { + return m.Plaintext + } + return nil +} + +// Response message for [KeyManagementService.Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. +type EncryptResponse struct { + // The resource name of the [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used in encryption. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The encrypted data. + Ciphertext []byte `protobuf:"bytes,2,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EncryptResponse) Reset() { *m = EncryptResponse{} } +func (m *EncryptResponse) String() string { return proto.CompactTextString(m) } +func (*EncryptResponse) ProtoMessage() {} +func (*EncryptResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{20} +} +func (m *EncryptResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EncryptResponse.Unmarshal(m, b) +} +func (m *EncryptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EncryptResponse.Marshal(b, m, deterministic) +} +func (dst *EncryptResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_EncryptResponse.Merge(dst, src) +} +func (m *EncryptResponse) XXX_Size() int { + return xxx_messageInfo_EncryptResponse.Size(m) +} +func (m *EncryptResponse) XXX_DiscardUnknown() { + xxx_messageInfo_EncryptResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_EncryptResponse proto.InternalMessageInfo + +func (m *EncryptResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EncryptResponse) GetCiphertext() []byte { + if m != nil { + return m.Ciphertext + } + return nil +} + +// Response message for [KeyManagementService.AsymmetricSign][google.cloud.kms.v1.KeyManagementService.AsymmetricSign]. +type AsymmetricSignResponse struct { + // The created signature. + Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AsymmetricSignResponse) Reset() { *m = AsymmetricSignResponse{} } +func (m *AsymmetricSignResponse) String() string { return proto.CompactTextString(m) } +func (*AsymmetricSignResponse) ProtoMessage() {} +func (*AsymmetricSignResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{21} +} +func (m *AsymmetricSignResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AsymmetricSignResponse.Unmarshal(m, b) +} +func (m *AsymmetricSignResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AsymmetricSignResponse.Marshal(b, m, deterministic) +} +func (dst *AsymmetricSignResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AsymmetricSignResponse.Merge(dst, src) +} +func (m *AsymmetricSignResponse) XXX_Size() int { + return xxx_messageInfo_AsymmetricSignResponse.Size(m) +} +func (m *AsymmetricSignResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AsymmetricSignResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AsymmetricSignResponse proto.InternalMessageInfo + +func (m *AsymmetricSignResponse) GetSignature() []byte { + if m != nil { + return m.Signature + } + return nil +} + +// Response message for [KeyManagementService.AsymmetricDecrypt][google.cloud.kms.v1.KeyManagementService.AsymmetricDecrypt]. +type AsymmetricDecryptResponse struct { + // The decrypted data originally encrypted with the matching public key. + Plaintext []byte `protobuf:"bytes,1,opt,name=plaintext,proto3" json:"plaintext,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AsymmetricDecryptResponse) Reset() { *m = AsymmetricDecryptResponse{} } +func (m *AsymmetricDecryptResponse) String() string { return proto.CompactTextString(m) } +func (*AsymmetricDecryptResponse) ProtoMessage() {} +func (*AsymmetricDecryptResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{22} +} +func (m *AsymmetricDecryptResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AsymmetricDecryptResponse.Unmarshal(m, b) +} +func (m *AsymmetricDecryptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AsymmetricDecryptResponse.Marshal(b, m, deterministic) +} +func (dst *AsymmetricDecryptResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AsymmetricDecryptResponse.Merge(dst, src) +} +func (m *AsymmetricDecryptResponse) XXX_Size() int { + return xxx_messageInfo_AsymmetricDecryptResponse.Size(m) +} +func (m *AsymmetricDecryptResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AsymmetricDecryptResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AsymmetricDecryptResponse proto.InternalMessageInfo + +func (m *AsymmetricDecryptResponse) GetPlaintext() []byte { + if m != nil { + return m.Plaintext + } + return nil +} + +// Request message for [KeyManagementService.UpdateCryptoKeyPrimaryVersion][google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyPrimaryVersion]. +type UpdateCryptoKeyPrimaryVersionRequest struct { + // The resource name of the [CryptoKey][google.cloud.kms.v1.CryptoKey] to update. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The id of the child [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to use as primary. + CryptoKeyVersionId string `protobuf:"bytes,2,opt,name=crypto_key_version_id,json=cryptoKeyVersionId,proto3" json:"crypto_key_version_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateCryptoKeyPrimaryVersionRequest) Reset() { *m = UpdateCryptoKeyPrimaryVersionRequest{} } +func (m *UpdateCryptoKeyPrimaryVersionRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateCryptoKeyPrimaryVersionRequest) ProtoMessage() {} +func (*UpdateCryptoKeyPrimaryVersionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{23} +} +func (m *UpdateCryptoKeyPrimaryVersionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateCryptoKeyPrimaryVersionRequest.Unmarshal(m, b) +} +func (m *UpdateCryptoKeyPrimaryVersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateCryptoKeyPrimaryVersionRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateCryptoKeyPrimaryVersionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateCryptoKeyPrimaryVersionRequest.Merge(dst, src) +} +func (m *UpdateCryptoKeyPrimaryVersionRequest) XXX_Size() int { + return xxx_messageInfo_UpdateCryptoKeyPrimaryVersionRequest.Size(m) +} +func (m *UpdateCryptoKeyPrimaryVersionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateCryptoKeyPrimaryVersionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateCryptoKeyPrimaryVersionRequest proto.InternalMessageInfo + +func (m *UpdateCryptoKeyPrimaryVersionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateCryptoKeyPrimaryVersionRequest) GetCryptoKeyVersionId() string { + if m != nil { + return m.CryptoKeyVersionId + } + return "" +} + +// Request message for [KeyManagementService.DestroyCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion]. +type DestroyCryptoKeyVersionRequest struct { + // The resource name of the [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to destroy. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DestroyCryptoKeyVersionRequest) Reset() { *m = DestroyCryptoKeyVersionRequest{} } +func (m *DestroyCryptoKeyVersionRequest) String() string { return proto.CompactTextString(m) } +func (*DestroyCryptoKeyVersionRequest) ProtoMessage() {} +func (*DestroyCryptoKeyVersionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{24} +} +func (m *DestroyCryptoKeyVersionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DestroyCryptoKeyVersionRequest.Unmarshal(m, b) +} +func (m *DestroyCryptoKeyVersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DestroyCryptoKeyVersionRequest.Marshal(b, m, deterministic) +} +func (dst *DestroyCryptoKeyVersionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DestroyCryptoKeyVersionRequest.Merge(dst, src) +} +func (m *DestroyCryptoKeyVersionRequest) XXX_Size() int { + return xxx_messageInfo_DestroyCryptoKeyVersionRequest.Size(m) +} +func (m *DestroyCryptoKeyVersionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DestroyCryptoKeyVersionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DestroyCryptoKeyVersionRequest proto.InternalMessageInfo + +func (m *DestroyCryptoKeyVersionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for [KeyManagementService.RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion]. +type RestoreCryptoKeyVersionRequest struct { + // The resource name of the [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to restore. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RestoreCryptoKeyVersionRequest) Reset() { *m = RestoreCryptoKeyVersionRequest{} } +func (m *RestoreCryptoKeyVersionRequest) String() string { return proto.CompactTextString(m) } +func (*RestoreCryptoKeyVersionRequest) ProtoMessage() {} +func (*RestoreCryptoKeyVersionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{25} +} +func (m *RestoreCryptoKeyVersionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RestoreCryptoKeyVersionRequest.Unmarshal(m, b) +} +func (m *RestoreCryptoKeyVersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RestoreCryptoKeyVersionRequest.Marshal(b, m, deterministic) +} +func (dst *RestoreCryptoKeyVersionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RestoreCryptoKeyVersionRequest.Merge(dst, src) +} +func (m *RestoreCryptoKeyVersionRequest) XXX_Size() int { + return xxx_messageInfo_RestoreCryptoKeyVersionRequest.Size(m) +} +func (m *RestoreCryptoKeyVersionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RestoreCryptoKeyVersionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RestoreCryptoKeyVersionRequest proto.InternalMessageInfo + +func (m *RestoreCryptoKeyVersionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// A [Digest][google.cloud.kms.v1.Digest] holds a cryptographic message digest. +type Digest struct { + // Required. The message digest. + // + // Types that are valid to be assigned to Digest: + // *Digest_Sha256 + // *Digest_Sha384 + // *Digest_Sha512 + Digest isDigest_Digest `protobuf_oneof:"digest"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Digest) Reset() { *m = Digest{} } +func (m *Digest) String() string { return proto.CompactTextString(m) } +func (*Digest) ProtoMessage() {} +func (*Digest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{26} +} +func (m *Digest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Digest.Unmarshal(m, b) +} +func (m *Digest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Digest.Marshal(b, m, deterministic) +} +func (dst *Digest) XXX_Merge(src proto.Message) { + xxx_messageInfo_Digest.Merge(dst, src) +} +func (m *Digest) XXX_Size() int { + return xxx_messageInfo_Digest.Size(m) +} +func (m *Digest) XXX_DiscardUnknown() { + xxx_messageInfo_Digest.DiscardUnknown(m) +} + +var xxx_messageInfo_Digest proto.InternalMessageInfo + +type isDigest_Digest interface { + isDigest_Digest() +} + +type Digest_Sha256 struct { + Sha256 []byte `protobuf:"bytes,1,opt,name=sha256,proto3,oneof"` +} + +type Digest_Sha384 struct { + Sha384 []byte `protobuf:"bytes,2,opt,name=sha384,proto3,oneof"` +} + +type Digest_Sha512 struct { + Sha512 []byte `protobuf:"bytes,3,opt,name=sha512,proto3,oneof"` +} + +func (*Digest_Sha256) isDigest_Digest() {} + +func (*Digest_Sha384) isDigest_Digest() {} + +func (*Digest_Sha512) isDigest_Digest() {} + +func (m *Digest) GetDigest() isDigest_Digest { + if m != nil { + return m.Digest + } + return nil +} + +func (m *Digest) GetSha256() []byte { + if x, ok := m.GetDigest().(*Digest_Sha256); ok { + return x.Sha256 + } + return nil +} + +func (m *Digest) GetSha384() []byte { + if x, ok := m.GetDigest().(*Digest_Sha384); ok { + return x.Sha384 + } + return nil +} + +func (m *Digest) GetSha512() []byte { + if x, ok := m.GetDigest().(*Digest_Sha512); ok { + return x.Sha512 + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Digest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Digest_OneofMarshaler, _Digest_OneofUnmarshaler, _Digest_OneofSizer, []interface{}{ + (*Digest_Sha256)(nil), + (*Digest_Sha384)(nil), + (*Digest_Sha512)(nil), + } +} + +func _Digest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Digest) + // digest + switch x := m.Digest.(type) { + case *Digest_Sha256: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Sha256) + case *Digest_Sha384: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Sha384) + case *Digest_Sha512: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Sha512) + case nil: + default: + return fmt.Errorf("Digest.Digest has unexpected type %T", x) + } + return nil +} + +func _Digest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Digest) + switch tag { + case 1: // digest.sha256 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Digest = &Digest_Sha256{x} + return true, err + case 2: // digest.sha384 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Digest = &Digest_Sha384{x} + return true, err + case 3: // digest.sha512 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Digest = &Digest_Sha512{x} + return true, err + default: + return false, nil + } +} + +func _Digest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Digest) + // digest + switch x := m.Digest.(type) { + case *Digest_Sha256: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Sha256))) + n += len(x.Sha256) + case *Digest_Sha384: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Sha384))) + n += len(x.Sha384) + case *Digest_Sha512: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Sha512))) + n += len(x.Sha512) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Cloud KMS metadata for the given [google.cloud.location.Location][google.cloud.location.Location]. +type LocationMetadata struct { + // Indicates whether [CryptoKeys][google.cloud.kms.v1.CryptoKey] with + // [protection_level][google.cloud.kms.v1.CryptoKeyVersionTemplate.protection_level] + // [HSM][google.cloud.kms.v1.ProtectionLevel.HSM] can be created in this location. + HsmAvailable bool `protobuf:"varint,1,opt,name=hsm_available,json=hsmAvailable,proto3" json:"hsm_available,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LocationMetadata) Reset() { *m = LocationMetadata{} } +func (m *LocationMetadata) String() string { return proto.CompactTextString(m) } +func (*LocationMetadata) ProtoMessage() {} +func (*LocationMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_service_799861e3104b1d84, []int{27} +} +func (m *LocationMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LocationMetadata.Unmarshal(m, b) +} +func (m *LocationMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LocationMetadata.Marshal(b, m, deterministic) +} +func (dst *LocationMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocationMetadata.Merge(dst, src) +} +func (m *LocationMetadata) XXX_Size() int { + return xxx_messageInfo_LocationMetadata.Size(m) +} +func (m *LocationMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_LocationMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_LocationMetadata proto.InternalMessageInfo + +func (m *LocationMetadata) GetHsmAvailable() bool { + if m != nil { + return m.HsmAvailable + } + return false +} + +func init() { + proto.RegisterType((*ListKeyRingsRequest)(nil), "google.cloud.kms.v1.ListKeyRingsRequest") + proto.RegisterType((*ListCryptoKeysRequest)(nil), "google.cloud.kms.v1.ListCryptoKeysRequest") + proto.RegisterType((*ListCryptoKeyVersionsRequest)(nil), "google.cloud.kms.v1.ListCryptoKeyVersionsRequest") + proto.RegisterType((*ListKeyRingsResponse)(nil), "google.cloud.kms.v1.ListKeyRingsResponse") + proto.RegisterType((*ListCryptoKeysResponse)(nil), "google.cloud.kms.v1.ListCryptoKeysResponse") + proto.RegisterType((*ListCryptoKeyVersionsResponse)(nil), "google.cloud.kms.v1.ListCryptoKeyVersionsResponse") + proto.RegisterType((*GetKeyRingRequest)(nil), "google.cloud.kms.v1.GetKeyRingRequest") + proto.RegisterType((*GetCryptoKeyRequest)(nil), "google.cloud.kms.v1.GetCryptoKeyRequest") + proto.RegisterType((*GetCryptoKeyVersionRequest)(nil), "google.cloud.kms.v1.GetCryptoKeyVersionRequest") + proto.RegisterType((*GetPublicKeyRequest)(nil), "google.cloud.kms.v1.GetPublicKeyRequest") + proto.RegisterType((*CreateKeyRingRequest)(nil), "google.cloud.kms.v1.CreateKeyRingRequest") + proto.RegisterType((*CreateCryptoKeyRequest)(nil), "google.cloud.kms.v1.CreateCryptoKeyRequest") + proto.RegisterType((*CreateCryptoKeyVersionRequest)(nil), "google.cloud.kms.v1.CreateCryptoKeyVersionRequest") + proto.RegisterType((*UpdateCryptoKeyRequest)(nil), "google.cloud.kms.v1.UpdateCryptoKeyRequest") + proto.RegisterType((*UpdateCryptoKeyVersionRequest)(nil), "google.cloud.kms.v1.UpdateCryptoKeyVersionRequest") + proto.RegisterType((*EncryptRequest)(nil), "google.cloud.kms.v1.EncryptRequest") + proto.RegisterType((*DecryptRequest)(nil), "google.cloud.kms.v1.DecryptRequest") + proto.RegisterType((*AsymmetricSignRequest)(nil), "google.cloud.kms.v1.AsymmetricSignRequest") + proto.RegisterType((*AsymmetricDecryptRequest)(nil), "google.cloud.kms.v1.AsymmetricDecryptRequest") + proto.RegisterType((*DecryptResponse)(nil), "google.cloud.kms.v1.DecryptResponse") + proto.RegisterType((*EncryptResponse)(nil), "google.cloud.kms.v1.EncryptResponse") + proto.RegisterType((*AsymmetricSignResponse)(nil), "google.cloud.kms.v1.AsymmetricSignResponse") + proto.RegisterType((*AsymmetricDecryptResponse)(nil), "google.cloud.kms.v1.AsymmetricDecryptResponse") + proto.RegisterType((*UpdateCryptoKeyPrimaryVersionRequest)(nil), "google.cloud.kms.v1.UpdateCryptoKeyPrimaryVersionRequest") + proto.RegisterType((*DestroyCryptoKeyVersionRequest)(nil), "google.cloud.kms.v1.DestroyCryptoKeyVersionRequest") + proto.RegisterType((*RestoreCryptoKeyVersionRequest)(nil), "google.cloud.kms.v1.RestoreCryptoKeyVersionRequest") + proto.RegisterType((*Digest)(nil), "google.cloud.kms.v1.Digest") + proto.RegisterType((*LocationMetadata)(nil), "google.cloud.kms.v1.LocationMetadata") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// KeyManagementServiceClient is the client API for KeyManagementService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type KeyManagementServiceClient interface { + // Lists [KeyRings][google.cloud.kms.v1.KeyRing]. + ListKeyRings(ctx context.Context, in *ListKeyRingsRequest, opts ...grpc.CallOption) (*ListKeyRingsResponse, error) + // Lists [CryptoKeys][google.cloud.kms.v1.CryptoKey]. + ListCryptoKeys(ctx context.Context, in *ListCryptoKeysRequest, opts ...grpc.CallOption) (*ListCryptoKeysResponse, error) + // Lists [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion]. + ListCryptoKeyVersions(ctx context.Context, in *ListCryptoKeyVersionsRequest, opts ...grpc.CallOption) (*ListCryptoKeyVersionsResponse, error) + // Returns metadata for a given [KeyRing][google.cloud.kms.v1.KeyRing]. + GetKeyRing(ctx context.Context, in *GetKeyRingRequest, opts ...grpc.CallOption) (*KeyRing, error) + // Returns metadata for a given [CryptoKey][google.cloud.kms.v1.CryptoKey], as well as its + // [primary][google.cloud.kms.v1.CryptoKey.primary] [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + GetCryptoKey(ctx context.Context, in *GetCryptoKeyRequest, opts ...grpc.CallOption) (*CryptoKey, error) + // Returns metadata for a given [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + GetCryptoKeyVersion(ctx context.Context, in *GetCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) + // Returns the public key for the given [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. The + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be + // [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN] or + // [ASYMMETRIC_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_DECRYPT]. + GetPublicKey(ctx context.Context, in *GetPublicKeyRequest, opts ...grpc.CallOption) (*PublicKey, error) + // Create a new [KeyRing][google.cloud.kms.v1.KeyRing] in a given Project and Location. + CreateKeyRing(ctx context.Context, in *CreateKeyRingRequest, opts ...grpc.CallOption) (*KeyRing, error) + // Create a new [CryptoKey][google.cloud.kms.v1.CryptoKey] within a [KeyRing][google.cloud.kms.v1.KeyRing]. + // + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] and + // [CryptoKey.version_template.algorithm][google.cloud.kms.v1.CryptoKeyVersionTemplate.algorithm] + // are required. + CreateCryptoKey(ctx context.Context, in *CreateCryptoKeyRequest, opts ...grpc.CallOption) (*CryptoKey, error) + // Create a new [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in a [CryptoKey][google.cloud.kms.v1.CryptoKey]. + // + // The server will assign the next sequential id. If unset, + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set to + // [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED]. + CreateCryptoKeyVersion(ctx context.Context, in *CreateCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) + // Update a [CryptoKey][google.cloud.kms.v1.CryptoKey]. + UpdateCryptoKey(ctx context.Context, in *UpdateCryptoKeyRequest, opts ...grpc.CallOption) (*CryptoKey, error) + // Update a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s metadata. + // + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] may be changed between + // [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] and + // [DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED] using this + // method. See [DestroyCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion] and [RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion] to + // move between other states. + UpdateCryptoKeyVersion(ctx context.Context, in *UpdateCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) + // Encrypts data, so that it can only be recovered by a call to [Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt]. + // The [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. + Encrypt(ctx context.Context, in *EncryptRequest, opts ...grpc.CallOption) (*EncryptResponse, error) + // Decrypts data that was protected by [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. The [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] + // must be [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. + Decrypt(ctx context.Context, in *DecryptRequest, opts ...grpc.CallOption) (*DecryptResponse, error) + // Signs data using a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] + // ASYMMETRIC_SIGN, producing a signature that can be verified with the public + // key retrieved from [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. + AsymmetricSign(ctx context.Context, in *AsymmetricSignRequest, opts ...grpc.CallOption) (*AsymmetricSignResponse, error) + // Decrypts data that was encrypted with a public key retrieved from + // [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey] corresponding to a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] ASYMMETRIC_DECRYPT. + AsymmetricDecrypt(ctx context.Context, in *AsymmetricDecryptRequest, opts ...grpc.CallOption) (*AsymmetricDecryptResponse, error) + // Update the version of a [CryptoKey][google.cloud.kms.v1.CryptoKey] that will be used in [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. + // + // Returns an error if called on an asymmetric key. + UpdateCryptoKeyPrimaryVersion(ctx context.Context, in *UpdateCryptoKeyPrimaryVersionRequest, opts ...grpc.CallOption) (*CryptoKey, error) + // Schedule a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] for destruction. + // + // Upon calling this method, [CryptoKeyVersion.state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set to + // [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED] + // and [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] will be set to a time 24 + // hours in the future, at which point the [state][google.cloud.kms.v1.CryptoKeyVersion.state] + // will be changed to + // [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED], and the key + // material will be irrevocably destroyed. + // + // Before the [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] is reached, + // [RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion] may be called to reverse the process. + DestroyCryptoKeyVersion(ctx context.Context, in *DestroyCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) + // Restore a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in the + // [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED] + // state. + // + // Upon restoration of the CryptoKeyVersion, [state][google.cloud.kms.v1.CryptoKeyVersion.state] + // will be set to [DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED], + // and [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] will be cleared. + RestoreCryptoKeyVersion(ctx context.Context, in *RestoreCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) +} + +type keyManagementServiceClient struct { + cc *grpc.ClientConn +} + +func NewKeyManagementServiceClient(cc *grpc.ClientConn) KeyManagementServiceClient { + return &keyManagementServiceClient{cc} +} + +func (c *keyManagementServiceClient) ListKeyRings(ctx context.Context, in *ListKeyRingsRequest, opts ...grpc.CallOption) (*ListKeyRingsResponse, error) { + out := new(ListKeyRingsResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/ListKeyRings", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) ListCryptoKeys(ctx context.Context, in *ListCryptoKeysRequest, opts ...grpc.CallOption) (*ListCryptoKeysResponse, error) { + out := new(ListCryptoKeysResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/ListCryptoKeys", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) ListCryptoKeyVersions(ctx context.Context, in *ListCryptoKeyVersionsRequest, opts ...grpc.CallOption) (*ListCryptoKeyVersionsResponse, error) { + out := new(ListCryptoKeyVersionsResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/ListCryptoKeyVersions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) GetKeyRing(ctx context.Context, in *GetKeyRingRequest, opts ...grpc.CallOption) (*KeyRing, error) { + out := new(KeyRing) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/GetKeyRing", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) GetCryptoKey(ctx context.Context, in *GetCryptoKeyRequest, opts ...grpc.CallOption) (*CryptoKey, error) { + out := new(CryptoKey) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/GetCryptoKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) GetCryptoKeyVersion(ctx context.Context, in *GetCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) { + out := new(CryptoKeyVersion) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/GetCryptoKeyVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) GetPublicKey(ctx context.Context, in *GetPublicKeyRequest, opts ...grpc.CallOption) (*PublicKey, error) { + out := new(PublicKey) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/GetPublicKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) CreateKeyRing(ctx context.Context, in *CreateKeyRingRequest, opts ...grpc.CallOption) (*KeyRing, error) { + out := new(KeyRing) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/CreateKeyRing", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) CreateCryptoKey(ctx context.Context, in *CreateCryptoKeyRequest, opts ...grpc.CallOption) (*CryptoKey, error) { + out := new(CryptoKey) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/CreateCryptoKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) CreateCryptoKeyVersion(ctx context.Context, in *CreateCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) { + out := new(CryptoKeyVersion) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/CreateCryptoKeyVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) UpdateCryptoKey(ctx context.Context, in *UpdateCryptoKeyRequest, opts ...grpc.CallOption) (*CryptoKey, error) { + out := new(CryptoKey) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) UpdateCryptoKeyVersion(ctx context.Context, in *UpdateCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) { + out := new(CryptoKeyVersion) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKeyVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) Encrypt(ctx context.Context, in *EncryptRequest, opts ...grpc.CallOption) (*EncryptResponse, error) { + out := new(EncryptResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/Encrypt", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) Decrypt(ctx context.Context, in *DecryptRequest, opts ...grpc.CallOption) (*DecryptResponse, error) { + out := new(DecryptResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/Decrypt", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) AsymmetricSign(ctx context.Context, in *AsymmetricSignRequest, opts ...grpc.CallOption) (*AsymmetricSignResponse, error) { + out := new(AsymmetricSignResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/AsymmetricSign", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) AsymmetricDecrypt(ctx context.Context, in *AsymmetricDecryptRequest, opts ...grpc.CallOption) (*AsymmetricDecryptResponse, error) { + out := new(AsymmetricDecryptResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/AsymmetricDecrypt", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) UpdateCryptoKeyPrimaryVersion(ctx context.Context, in *UpdateCryptoKeyPrimaryVersionRequest, opts ...grpc.CallOption) (*CryptoKey, error) { + out := new(CryptoKey) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKeyPrimaryVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) DestroyCryptoKeyVersion(ctx context.Context, in *DestroyCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) { + out := new(CryptoKeyVersion) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/DestroyCryptoKeyVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) RestoreCryptoKeyVersion(ctx context.Context, in *RestoreCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) { + out := new(CryptoKeyVersion) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/RestoreCryptoKeyVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// KeyManagementServiceServer is the server API for KeyManagementService service. +type KeyManagementServiceServer interface { + // Lists [KeyRings][google.cloud.kms.v1.KeyRing]. + ListKeyRings(context.Context, *ListKeyRingsRequest) (*ListKeyRingsResponse, error) + // Lists [CryptoKeys][google.cloud.kms.v1.CryptoKey]. + ListCryptoKeys(context.Context, *ListCryptoKeysRequest) (*ListCryptoKeysResponse, error) + // Lists [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion]. + ListCryptoKeyVersions(context.Context, *ListCryptoKeyVersionsRequest) (*ListCryptoKeyVersionsResponse, error) + // Returns metadata for a given [KeyRing][google.cloud.kms.v1.KeyRing]. + GetKeyRing(context.Context, *GetKeyRingRequest) (*KeyRing, error) + // Returns metadata for a given [CryptoKey][google.cloud.kms.v1.CryptoKey], as well as its + // [primary][google.cloud.kms.v1.CryptoKey.primary] [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + GetCryptoKey(context.Context, *GetCryptoKeyRequest) (*CryptoKey, error) + // Returns metadata for a given [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + GetCryptoKeyVersion(context.Context, *GetCryptoKeyVersionRequest) (*CryptoKeyVersion, error) + // Returns the public key for the given [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. The + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be + // [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN] or + // [ASYMMETRIC_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_DECRYPT]. + GetPublicKey(context.Context, *GetPublicKeyRequest) (*PublicKey, error) + // Create a new [KeyRing][google.cloud.kms.v1.KeyRing] in a given Project and Location. + CreateKeyRing(context.Context, *CreateKeyRingRequest) (*KeyRing, error) + // Create a new [CryptoKey][google.cloud.kms.v1.CryptoKey] within a [KeyRing][google.cloud.kms.v1.KeyRing]. + // + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] and + // [CryptoKey.version_template.algorithm][google.cloud.kms.v1.CryptoKeyVersionTemplate.algorithm] + // are required. + CreateCryptoKey(context.Context, *CreateCryptoKeyRequest) (*CryptoKey, error) + // Create a new [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in a [CryptoKey][google.cloud.kms.v1.CryptoKey]. + // + // The server will assign the next sequential id. If unset, + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set to + // [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED]. + CreateCryptoKeyVersion(context.Context, *CreateCryptoKeyVersionRequest) (*CryptoKeyVersion, error) + // Update a [CryptoKey][google.cloud.kms.v1.CryptoKey]. + UpdateCryptoKey(context.Context, *UpdateCryptoKeyRequest) (*CryptoKey, error) + // Update a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s metadata. + // + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] may be changed between + // [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] and + // [DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED] using this + // method. See [DestroyCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion] and [RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion] to + // move between other states. + UpdateCryptoKeyVersion(context.Context, *UpdateCryptoKeyVersionRequest) (*CryptoKeyVersion, error) + // Encrypts data, so that it can only be recovered by a call to [Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt]. + // The [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. + Encrypt(context.Context, *EncryptRequest) (*EncryptResponse, error) + // Decrypts data that was protected by [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. The [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] + // must be [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. + Decrypt(context.Context, *DecryptRequest) (*DecryptResponse, error) + // Signs data using a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] + // ASYMMETRIC_SIGN, producing a signature that can be verified with the public + // key retrieved from [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. + AsymmetricSign(context.Context, *AsymmetricSignRequest) (*AsymmetricSignResponse, error) + // Decrypts data that was encrypted with a public key retrieved from + // [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey] corresponding to a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] ASYMMETRIC_DECRYPT. + AsymmetricDecrypt(context.Context, *AsymmetricDecryptRequest) (*AsymmetricDecryptResponse, error) + // Update the version of a [CryptoKey][google.cloud.kms.v1.CryptoKey] that will be used in [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. + // + // Returns an error if called on an asymmetric key. + UpdateCryptoKeyPrimaryVersion(context.Context, *UpdateCryptoKeyPrimaryVersionRequest) (*CryptoKey, error) + // Schedule a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] for destruction. + // + // Upon calling this method, [CryptoKeyVersion.state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set to + // [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED] + // and [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] will be set to a time 24 + // hours in the future, at which point the [state][google.cloud.kms.v1.CryptoKeyVersion.state] + // will be changed to + // [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED], and the key + // material will be irrevocably destroyed. + // + // Before the [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] is reached, + // [RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion] may be called to reverse the process. + DestroyCryptoKeyVersion(context.Context, *DestroyCryptoKeyVersionRequest) (*CryptoKeyVersion, error) + // Restore a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in the + // [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED] + // state. + // + // Upon restoration of the CryptoKeyVersion, [state][google.cloud.kms.v1.CryptoKeyVersion.state] + // will be set to [DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED], + // and [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] will be cleared. + RestoreCryptoKeyVersion(context.Context, *RestoreCryptoKeyVersionRequest) (*CryptoKeyVersion, error) +} + +func RegisterKeyManagementServiceServer(s *grpc.Server, srv KeyManagementServiceServer) { + s.RegisterService(&_KeyManagementService_serviceDesc, srv) +} + +func _KeyManagementService_ListKeyRings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListKeyRingsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).ListKeyRings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/ListKeyRings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).ListKeyRings(ctx, req.(*ListKeyRingsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_ListCryptoKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListCryptoKeysRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).ListCryptoKeys(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/ListCryptoKeys", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).ListCryptoKeys(ctx, req.(*ListCryptoKeysRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_ListCryptoKeyVersions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListCryptoKeyVersionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).ListCryptoKeyVersions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/ListCryptoKeyVersions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).ListCryptoKeyVersions(ctx, req.(*ListCryptoKeyVersionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_GetKeyRing_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetKeyRingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).GetKeyRing(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/GetKeyRing", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).GetKeyRing(ctx, req.(*GetKeyRingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_GetCryptoKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCryptoKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).GetCryptoKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/GetCryptoKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).GetCryptoKey(ctx, req.(*GetCryptoKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_GetCryptoKeyVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCryptoKeyVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).GetCryptoKeyVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/GetCryptoKeyVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).GetCryptoKeyVersion(ctx, req.(*GetCryptoKeyVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_GetPublicKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPublicKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).GetPublicKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/GetPublicKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).GetPublicKey(ctx, req.(*GetPublicKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_CreateKeyRing_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateKeyRingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).CreateKeyRing(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/CreateKeyRing", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).CreateKeyRing(ctx, req.(*CreateKeyRingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_CreateCryptoKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateCryptoKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).CreateCryptoKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/CreateCryptoKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).CreateCryptoKey(ctx, req.(*CreateCryptoKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_CreateCryptoKeyVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateCryptoKeyVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).CreateCryptoKeyVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/CreateCryptoKeyVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).CreateCryptoKeyVersion(ctx, req.(*CreateCryptoKeyVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_UpdateCryptoKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateCryptoKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).UpdateCryptoKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).UpdateCryptoKey(ctx, req.(*UpdateCryptoKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_UpdateCryptoKeyVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateCryptoKeyVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).UpdateCryptoKeyVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKeyVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).UpdateCryptoKeyVersion(ctx, req.(*UpdateCryptoKeyVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_Encrypt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EncryptRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).Encrypt(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/Encrypt", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).Encrypt(ctx, req.(*EncryptRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_Decrypt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DecryptRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).Decrypt(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/Decrypt", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).Decrypt(ctx, req.(*DecryptRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_AsymmetricSign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AsymmetricSignRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).AsymmetricSign(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/AsymmetricSign", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).AsymmetricSign(ctx, req.(*AsymmetricSignRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_AsymmetricDecrypt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AsymmetricDecryptRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).AsymmetricDecrypt(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/AsymmetricDecrypt", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).AsymmetricDecrypt(ctx, req.(*AsymmetricDecryptRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_UpdateCryptoKeyPrimaryVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateCryptoKeyPrimaryVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).UpdateCryptoKeyPrimaryVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKeyPrimaryVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).UpdateCryptoKeyPrimaryVersion(ctx, req.(*UpdateCryptoKeyPrimaryVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_DestroyCryptoKeyVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DestroyCryptoKeyVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).DestroyCryptoKeyVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/DestroyCryptoKeyVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).DestroyCryptoKeyVersion(ctx, req.(*DestroyCryptoKeyVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_RestoreCryptoKeyVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RestoreCryptoKeyVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).RestoreCryptoKeyVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/RestoreCryptoKeyVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).RestoreCryptoKeyVersion(ctx, req.(*RestoreCryptoKeyVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _KeyManagementService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.kms.v1.KeyManagementService", + HandlerType: (*KeyManagementServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListKeyRings", + Handler: _KeyManagementService_ListKeyRings_Handler, + }, + { + MethodName: "ListCryptoKeys", + Handler: _KeyManagementService_ListCryptoKeys_Handler, + }, + { + MethodName: "ListCryptoKeyVersions", + Handler: _KeyManagementService_ListCryptoKeyVersions_Handler, + }, + { + MethodName: "GetKeyRing", + Handler: _KeyManagementService_GetKeyRing_Handler, + }, + { + MethodName: "GetCryptoKey", + Handler: _KeyManagementService_GetCryptoKey_Handler, + }, + { + MethodName: "GetCryptoKeyVersion", + Handler: _KeyManagementService_GetCryptoKeyVersion_Handler, + }, + { + MethodName: "GetPublicKey", + Handler: _KeyManagementService_GetPublicKey_Handler, + }, + { + MethodName: "CreateKeyRing", + Handler: _KeyManagementService_CreateKeyRing_Handler, + }, + { + MethodName: "CreateCryptoKey", + Handler: _KeyManagementService_CreateCryptoKey_Handler, + }, + { + MethodName: "CreateCryptoKeyVersion", + Handler: _KeyManagementService_CreateCryptoKeyVersion_Handler, + }, + { + MethodName: "UpdateCryptoKey", + Handler: _KeyManagementService_UpdateCryptoKey_Handler, + }, + { + MethodName: "UpdateCryptoKeyVersion", + Handler: _KeyManagementService_UpdateCryptoKeyVersion_Handler, + }, + { + MethodName: "Encrypt", + Handler: _KeyManagementService_Encrypt_Handler, + }, + { + MethodName: "Decrypt", + Handler: _KeyManagementService_Decrypt_Handler, + }, + { + MethodName: "AsymmetricSign", + Handler: _KeyManagementService_AsymmetricSign_Handler, + }, + { + MethodName: "AsymmetricDecrypt", + Handler: _KeyManagementService_AsymmetricDecrypt_Handler, + }, + { + MethodName: "UpdateCryptoKeyPrimaryVersion", + Handler: _KeyManagementService_UpdateCryptoKeyPrimaryVersion_Handler, + }, + { + MethodName: "DestroyCryptoKeyVersion", + Handler: _KeyManagementService_DestroyCryptoKeyVersion_Handler, + }, + { + MethodName: "RestoreCryptoKeyVersion", + Handler: _KeyManagementService_RestoreCryptoKeyVersion_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/kms/v1/service.proto", +} + +func init() { + proto.RegisterFile("google/cloud/kms/v1/service.proto", fileDescriptor_service_799861e3104b1d84) +} + +var fileDescriptor_service_799861e3104b1d84 = []byte{ + // 1666 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x73, 0x1b, 0xc5, + 0x12, 0x7f, 0x63, 0xe7, 0x39, 0x76, 0xfb, 0x2b, 0x19, 0x27, 0x8e, 0x9f, 0x62, 0xbb, 0xfc, 0x26, + 0x79, 0x79, 0x8e, 0x03, 0x12, 0x92, 0x9d, 0x0f, 0x2b, 0x15, 0x28, 0x3b, 0x4e, 0x4c, 0xca, 0x71, + 0x70, 0xad, 0xb1, 0x21, 0x29, 0x53, 0xf2, 0x58, 0x3b, 0x91, 0x37, 0xd2, 0xee, 0x8a, 0xdd, 0x95, + 0x12, 0x05, 0x72, 0xe1, 0x00, 0x39, 0xe4, 0x16, 0x0e, 0xa1, 0xb8, 0x50, 0x70, 0xa3, 0xe0, 0xc0, + 0x0d, 0x0e, 0x5c, 0x28, 0x4e, 0xa9, 0xe2, 0x02, 0x95, 0xe2, 0x96, 0x13, 0xff, 0x00, 0x37, 0x8e, + 0xd4, 0xce, 0xce, 0xae, 0xb4, 0xab, 0x5d, 0x7d, 0x45, 0x29, 0x6e, 0xde, 0x99, 0x9e, 0x9e, 0xdf, + 0xaf, 0xfb, 0x37, 0x33, 0xdd, 0x32, 0xfc, 0x37, 0xa7, 0xeb, 0xb9, 0x02, 0x4b, 0x64, 0x0b, 0x7a, + 0x49, 0x4e, 0xe4, 0x55, 0x33, 0x51, 0x4e, 0x26, 0x4c, 0x66, 0x94, 0x95, 0x2c, 0x8b, 0x17, 0x0d, + 0xdd, 0xd2, 0xf1, 0x98, 0x63, 0x12, 0xe7, 0x26, 0xf1, 0xbc, 0x6a, 0xc6, 0xcb, 0xc9, 0xd8, 0xa4, + 0x58, 0x47, 0x8b, 0x4a, 0x82, 0x6a, 0x9a, 0x6e, 0x51, 0x4b, 0xd1, 0x35, 0xd3, 0x59, 0x12, 0x3b, + 0x11, 0xe6, 0xd5, 0x60, 0xa6, 0x5e, 0x32, 0xb2, 0xcc, 0x35, 0x9a, 0x11, 0x46, 0xfc, 0x6b, 0xaf, + 0x74, 0x3b, 0x71, 0x5b, 0x61, 0x05, 0x39, 0xa3, 0x52, 0x33, 0x2f, 0x2c, 0x26, 0x83, 0x16, 0xa6, + 0x65, 0x94, 0xb2, 0x96, 0x98, 0x9d, 0x0e, 0xce, 0xde, 0x35, 0x68, 0xb1, 0xc8, 0x0c, 0xe1, 0x9f, + 0x28, 0x30, 0x76, 0x5d, 0x31, 0xad, 0x35, 0x56, 0x91, 0x14, 0x2d, 0x67, 0x4a, 0xec, 0xfd, 0x12, + 0x33, 0x2d, 0x3c, 0x0e, 0x7d, 0x45, 0x6a, 0x30, 0xcd, 0x9a, 0x40, 0x33, 0x68, 0x76, 0x40, 0x12, + 0x5f, 0xf8, 0x38, 0x0c, 0x14, 0x69, 0x8e, 0x65, 0x4c, 0xe5, 0x3e, 0x9b, 0xe8, 0x99, 0x41, 0xb3, + 0xff, 0x96, 0xfa, 0xed, 0x81, 0x4d, 0xe5, 0x3e, 0xc3, 0x53, 0x00, 0x7c, 0xd2, 0xd2, 0xf3, 0x4c, + 0x9b, 0xe8, 0xe5, 0x0b, 0xb9, 0xf9, 0xdb, 0xf6, 0x00, 0x79, 0x8a, 0xe0, 0xa8, 0xbd, 0xd7, 0x65, + 0xa3, 0x52, 0xb4, 0xf4, 0x35, 0x56, 0x79, 0x99, 0xbb, 0xe1, 0xf7, 0x60, 0xa8, 0xcc, 0x0c, 0x53, + 0xd1, 0xb5, 0x4c, 0x59, 0x61, 0x77, 0x27, 0x0e, 0xcc, 0xa0, 0xd9, 0x91, 0x54, 0x3a, 0x1e, 0x92, + 0xa7, 0xb8, 0x87, 0x68, 0xdb, 0x59, 0x51, 0x37, 0xb0, 0xad, 0xb0, 0xbb, 0xd2, 0x60, 0xb9, 0xfa, + 0x41, 0x7e, 0x42, 0x30, 0xe9, 0x23, 0x23, 0x2c, 0x5f, 0x2a, 0xa7, 0x1b, 0x70, 0xa0, 0x4b, 0x5c, + 0xb8, 0x1f, 0xf2, 0x04, 0xc1, 0x11, 0x7f, 0xf6, 0xcd, 0xa2, 0xae, 0x99, 0x0c, 0x2f, 0xc2, 0x40, + 0x9e, 0x55, 0x32, 0x86, 0x3d, 0x38, 0x81, 0x66, 0x7a, 0x67, 0x07, 0x53, 0x93, 0xa1, 0xbb, 0x89, + 0x95, 0x52, 0x7f, 0x5e, 0xb8, 0xc0, 0xa7, 0x60, 0x54, 0x63, 0xf7, 0xac, 0x4c, 0x0d, 0x8f, 0x1e, + 0xce, 0x63, 0xd8, 0x1e, 0xde, 0xf0, 0xb8, 0x4c, 0x01, 0x58, 0xba, 0x45, 0x0b, 0x4e, 0x20, 0x7a, + 0x79, 0x20, 0x06, 0xf8, 0x88, 0x1d, 0x09, 0xf2, 0x05, 0x82, 0xf1, 0xa0, 0x58, 0x04, 0xb8, 0x37, + 0x60, 0x30, 0xcb, 0x47, 0x33, 0x79, 0x56, 0x71, 0xe1, 0x4d, 0x37, 0x0e, 0x86, 0x04, 0x59, 0xcf, + 0x51, 0xb7, 0x20, 0xfe, 0x88, 0x60, 0x2a, 0x42, 0x02, 0x02, 0xe9, 0x16, 0x8c, 0x55, 0x91, 0x66, + 0x84, 0x7c, 0x5c, 0xc4, 0xff, 0x6b, 0x29, 0x7d, 0xd2, 0xe1, 0x6c, 0xd0, 0x7d, 0xb7, 0xf0, 0xff, + 0x1f, 0x0e, 0xaf, 0x32, 0x37, 0xf7, 0xae, 0x6c, 0x31, 0x1c, 0xd0, 0xa8, 0xca, 0x84, 0x68, 0xf9, + 0xdf, 0xe4, 0x34, 0x8c, 0xad, 0xb2, 0x2a, 0xcd, 0x46, 0xa6, 0xaf, 0x41, 0xac, 0xd6, 0xd4, 0x25, + 0xd1, 0xd4, 0xf9, 0x46, 0x69, 0xaf, 0xa0, 0x64, 0x9b, 0x38, 0xff, 0x04, 0xc1, 0x91, 0xcb, 0x06, + 0xa3, 0x16, 0x0b, 0x80, 0x8e, 0x3a, 0x6b, 0xd3, 0x30, 0xe8, 0xca, 0x38, 0xa3, 0xc8, 0x22, 0x48, + 0x03, 0x42, 0xaa, 0xd7, 0x64, 0x7c, 0x1e, 0xfa, 0xdd, 0x79, 0x1e, 0x9e, 0x66, 0x2a, 0x3f, 0x28, + 0x96, 0x92, 0xc7, 0x08, 0xc6, 0x1d, 0x24, 0x75, 0x51, 0x89, 0xc2, 0x42, 0x60, 0xb8, 0x46, 0x0b, + 0x1e, 0x9a, 0x41, 0x2f, 0xbd, 0xd7, 0x64, 0x7c, 0x09, 0xa0, 0x6a, 0x23, 0x10, 0x35, 0x13, 0xf6, + 0x80, 0xe7, 0x80, 0x3c, 0x42, 0x30, 0x15, 0x40, 0x15, 0x48, 0x40, 0x14, 0xb8, 0x4d, 0xc0, 0xf5, + 0x42, 0xe5, 0x08, 0x5b, 0xd6, 0xe9, 0xa1, 0xa0, 0x4e, 0xc9, 0xa7, 0x08, 0xc6, 0xb7, 0x8a, 0x72, + 0x58, 0x90, 0xfc, 0x44, 0x51, 0x9b, 0x44, 0xf1, 0x45, 0x18, 0x2c, 0x71, 0xc7, 0xfc, 0x1d, 0x14, + 0x38, 0x63, 0xee, 0x7a, 0xf7, 0xa9, 0x8b, 0x5f, 0xb5, 0x9f, 0xca, 0x75, 0x6a, 0xe6, 0x25, 0x70, + 0xcc, 0xed, 0xbf, 0xc9, 0x77, 0x08, 0xa6, 0x02, 0xb0, 0x02, 0x51, 0x0a, 0x8f, 0x06, 0x7a, 0xa1, + 0x68, 0xbc, 0x18, 0xe6, 0x8f, 0x11, 0x8c, 0x5c, 0xd1, 0xb8, 0xcf, 0x06, 0x07, 0x04, 0x4f, 0xc2, + 0x40, 0xb1, 0x40, 0x15, 0xcd, 0x62, 0xf7, 0x2c, 0xbe, 0xc3, 0x90, 0x54, 0x1d, 0xc0, 0xcb, 0x30, + 0x45, 0x65, 0x59, 0xb1, 0x4b, 0x10, 0x5a, 0xc8, 0xd0, 0x92, 0xb5, 0xcf, 0x34, 0x4b, 0xc9, 0x52, + 0x8b, 0xc9, 0x19, 0x99, 0x5a, 0x94, 0x0b, 0x6e, 0x48, 0x3a, 0x5e, 0x35, 0x5a, 0xaa, 0xb5, 0x59, + 0xa1, 0x16, 0x25, 0x0f, 0x11, 0x8c, 0xac, 0xb0, 0xa6, 0x40, 0xa6, 0x01, 0xb2, 0x4a, 0x71, 0x9f, + 0x19, 0x35, 0x48, 0x6a, 0x46, 0xba, 0x02, 0x65, 0x17, 0x8e, 0x2e, 0x99, 0x15, 0x55, 0x65, 0x96, + 0xa1, 0x64, 0x37, 0x95, 0x5c, 0xa3, 0x5b, 0x06, 0xcf, 0x43, 0x9f, 0xac, 0xe4, 0x98, 0x69, 0x89, + 0x53, 0x75, 0x3c, 0x34, 0x8d, 0x2b, 0xdc, 0x44, 0x12, 0xa6, 0xe4, 0x06, 0x4c, 0x54, 0x77, 0x68, + 0x9b, 0x75, 0x6f, 0x90, 0x35, 0x49, 0xc0, 0xa8, 0xe7, 0x45, 0xbc, 0x10, 0xbe, 0x8c, 0xa1, 0x40, + 0xc6, 0xc8, 0x15, 0x18, 0xf5, 0xb2, 0x2e, 0x16, 0x74, 0x10, 0x6d, 0x72, 0x0e, 0xc6, 0x83, 0x91, + 0xaa, 0x6e, 0x6f, 0x2a, 0x39, 0x8d, 0x5a, 0x25, 0x83, 0xb9, 0xdb, 0x7b, 0x03, 0x64, 0x11, 0xfe, + 0x13, 0xc2, 0xbf, 0x25, 0xe4, 0x2a, 0x9c, 0x0c, 0x9c, 0xb1, 0x0d, 0x43, 0x51, 0xa9, 0xd1, 0xc2, + 0x8b, 0x80, 0x93, 0x70, 0xb4, 0xfe, 0xf8, 0x55, 0x6f, 0x4c, 0x1c, 0x3c, 0x5a, 0xd7, 0x64, 0xb2, + 0x00, 0xd3, 0x2b, 0xcc, 0xb4, 0x0c, 0xbd, 0xd2, 0xce, 0xd3, 0xb3, 0x00, 0xd3, 0x12, 0x33, 0x2d, + 0xdd, 0x60, 0xed, 0xac, 0xda, 0x85, 0x3e, 0x47, 0x27, 0x78, 0x02, 0xfa, 0xcc, 0x7d, 0x9a, 0x3a, + 0x7b, 0xce, 0xe1, 0xff, 0xe6, 0xbf, 0x24, 0xf1, 0x2d, 0x66, 0xe6, 0x2f, 0x2c, 0x38, 0xd9, 0x10, + 0x33, 0xf3, 0x17, 0x16, 0xc4, 0xcc, 0xd9, 0x64, 0xca, 0xd1, 0x87, 0x98, 0x39, 0x9b, 0x4c, 0x2d, + 0xf7, 0xbb, 0x12, 0x25, 0xe7, 0xe1, 0xd0, 0x75, 0x3d, 0xcb, 0x7b, 0x85, 0x75, 0x66, 0x51, 0xfb, + 0x40, 0xe0, 0x13, 0x30, 0xbc, 0x6f, 0xaa, 0x19, 0x5a, 0xa6, 0x4a, 0x81, 0xee, 0x15, 0x1c, 0x48, + 0xfd, 0xd2, 0xd0, 0xbe, 0xa9, 0x2e, 0xb9, 0x63, 0xa9, 0x5f, 0x26, 0xe1, 0xc8, 0x1a, 0xab, 0xac, + 0x53, 0x8d, 0xe6, 0x98, 0xca, 0x34, 0x6b, 0xd3, 0xe9, 0x51, 0xf0, 0x67, 0x08, 0x86, 0x6a, 0x0b, + 0x3d, 0x3c, 0x1b, 0xaa, 0xff, 0x90, 0x4e, 0x20, 0x76, 0xba, 0x05, 0x4b, 0x47, 0x12, 0x64, 0xe1, + 0xa3, 0xdf, 0xfe, 0x78, 0xdc, 0x13, 0xc7, 0xaf, 0xd8, 0x8d, 0xcc, 0x07, 0xce, 0xd3, 0x72, 0xa9, + 0x68, 0xe8, 0x77, 0x58, 0xd6, 0x32, 0x13, 0x73, 0x89, 0x82, 0x20, 0x65, 0x26, 0xe6, 0x1e, 0x24, + 0xbc, 0x82, 0xf1, 0x1b, 0x04, 0x23, 0xfe, 0x4a, 0x0f, 0xcf, 0x45, 0xee, 0x59, 0xd7, 0x3b, 0xc4, + 0xce, 0xb4, 0x64, 0x2b, 0x10, 0x2e, 0x71, 0x84, 0x17, 0xf1, 0x62, 0x73, 0x84, 0x1e, 0x40, 0x1b, + 0x6c, 0x4d, 0xf1, 0xf8, 0x2c, 0xd8, 0xc5, 0x78, 0x65, 0x59, 0xb2, 0x39, 0x92, 0x40, 0x93, 0x10, + 0x4b, 0xb5, 0xb3, 0x44, 0x70, 0xd8, 0xe2, 0x1c, 0xde, 0xc2, 0xeb, 0x6d, 0x71, 0xa8, 0xa1, 0xe0, + 0x23, 0xe4, 0xa1, 0x7f, 0x88, 0x00, 0xaa, 0xe5, 0x20, 0x3e, 0x15, 0x8a, 0xac, 0xae, 0x5e, 0x8c, + 0x35, 0x2c, 0x98, 0x02, 0x8a, 0xb0, 0x8f, 0x4f, 0x0b, 0xd1, 0xc6, 0x9f, 0x23, 0x18, 0xaa, 0xad, + 0x22, 0x23, 0xd4, 0x1a, 0x52, 0x93, 0xc6, 0x9a, 0x14, 0x11, 0x01, 0x01, 0xb4, 0x06, 0xc8, 0x1f, + 0x3a, 0xfc, 0x33, 0xf2, 0x97, 0xc3, 0xee, 0x03, 0x9f, 0x68, 0x0a, 0xd2, 0x7f, 0xb9, 0xc4, 0x5a, + 0x2b, 0x25, 0x02, 0xf9, 0xee, 0x00, 0x72, 0x7d, 0xb2, 0x6d, 0x1a, 0x3f, 0x38, 0x41, 0xf6, 0x0a, + 0xef, 0xe8, 0x20, 0x07, 0x6b, 0xf3, 0x88, 0x20, 0x7b, 0x66, 0x24, 0xc3, 0x11, 0xdf, 0xc4, 0xef, + 0x74, 0x15, 0x71, 0xa2, 0xe8, 0x41, 0x7d, 0x82, 0x60, 0xd8, 0xd7, 0x08, 0xe0, 0xd3, 0x11, 0xb1, + 0xac, 0x6f, 0x16, 0x9a, 0x28, 0xf6, 0x75, 0x8e, 0xfd, 0x02, 0x69, 0xeb, 0x0e, 0x4b, 0x7b, 0x6d, + 0x04, 0xfe, 0x16, 0xc1, 0x68, 0xa0, 0x06, 0xc7, 0x67, 0x1a, 0x80, 0x6b, 0x5b, 0xc1, 0xeb, 0x1c, + 0xe0, 0x2a, 0xe9, 0xfc, 0x0a, 0x4b, 0xd7, 0xd4, 0xde, 0xf8, 0x79, 0x7d, 0x27, 0xe3, 0x0a, 0x3a, + 0xd5, 0x0a, 0xec, 0xce, 0x34, 0xad, 0x70, 0x12, 0x59, 0xd2, 0xdd, 0x3b, 0x2c, 0x1d, 0x52, 0xb6, + 0xe3, 0xef, 0x11, 0x8c, 0x06, 0x4a, 0x91, 0x88, 0x84, 0x84, 0xf7, 0x2a, 0x4d, 0x13, 0xf2, 0x2e, + 0xe7, 0x22, 0xa5, 0x56, 0x38, 0x97, 0x2a, 0x82, 0x78, 0x27, 0xb7, 0x8b, 0x2f, 0x37, 0x7f, 0xd6, + 0x37, 0x50, 0x8d, 0x73, 0xd3, 0xb0, 0xad, 0x69, 0x35, 0x37, 0x1f, 0x72, 0x3e, 0xe5, 0xd4, 0x6e, + 0x80, 0x8f, 0x1b, 0xd1, 0x78, 0xb7, 0x0e, 0x74, 0x68, 0xba, 0xbe, 0x42, 0x70, 0x50, 0xd4, 0xbc, + 0xf8, 0x44, 0x28, 0x60, 0x7f, 0x1f, 0x14, 0x3b, 0xd9, 0xd8, 0x48, 0x3c, 0x9a, 0xee, 0xa9, 0x59, + 0xee, 0xf4, 0x4a, 0x9a, 0x7b, 0x90, 0x66, 0x8e, 0xcf, 0x34, 0x9a, 0xc3, 0x5f, 0x22, 0x38, 0x28, + 0x0a, 0xe2, 0x08, 0x94, 0xfe, 0x76, 0x21, 0x02, 0x65, 0xa0, 0xa6, 0x26, 0xd7, 0x39, 0xca, 0xab, + 0x64, 0xa9, 0xe3, 0xd7, 0x29, 0x2d, 0x33, 0x0f, 0xe4, 0xaf, 0x08, 0x46, 0xfc, 0x75, 0x7f, 0x44, + 0x61, 0x15, 0xda, 0x46, 0x45, 0x14, 0x56, 0xe1, 0x8d, 0x04, 0xc9, 0x71, 0xe4, 0x94, 0xec, 0x74, + 0xf5, 0xca, 0x4f, 0x53, 0xdf, 0x6e, 0x36, 0xa9, 0xe7, 0x08, 0x0e, 0xd7, 0x35, 0x25, 0xf8, 0xd5, + 0x26, 0x58, 0x03, 0xd9, 0x88, 0xb7, 0x6a, 0x2e, 0xd8, 0xdd, 0xe1, 0xec, 0x64, 0x92, 0x79, 0x59, + 0xec, 0x56, 0xaa, 0x59, 0x7b, 0x56, 0xff, 0xf3, 0x84, 0xbf, 0x75, 0xc2, 0x8b, 0xad, 0x9c, 0xfd, + 0xd0, 0x76, 0xab, 0xe9, 0x5d, 0x76, 0x93, 0x13, 0xdd, 0x24, 0x37, 0x3a, 0x17, 0xa0, 0xf3, 0xab, + 0x85, 0x7f, 0x7b, 0x9b, 0xd7, 0xef, 0x08, 0x8e, 0x45, 0xf4, 0x68, 0x78, 0x3e, 0xe2, 0x74, 0x34, + 0xea, 0xe8, 0x5a, 0xbd, 0xce, 0x76, 0x39, 0xa5, 0x5b, 0x64, 0xab, 0xbb, 0xb9, 0x93, 0x1d, 0x70, + 0x2e, 0xb3, 0x88, 0x3e, 0x32, 0x82, 0x59, 0xe3, 0xae, 0xf3, 0x1f, 0x66, 0x66, 0x38, 0xe0, 0xd2, + 0x68, 0x6e, 0xf9, 0x11, 0x82, 0x63, 0x59, 0x5d, 0x0d, 0x83, 0xb3, 0xdc, 0xbf, 0xa6, 0x9a, 0x1b, + 0x86, 0x6e, 0xe9, 0x1b, 0xe8, 0xd6, 0x39, 0x61, 0x90, 0xd3, 0x0b, 0x54, 0xcb, 0xc5, 0x75, 0x23, + 0x97, 0xc8, 0x31, 0x8d, 0xff, 0xa6, 0x95, 0x70, 0xa6, 0x68, 0x51, 0x31, 0x7d, 0xff, 0xe8, 0xba, + 0x98, 0x57, 0xcd, 0xbf, 0x10, 0xfa, 0xba, 0x67, 0x6c, 0xd5, 0x59, 0x7b, 0x99, 0x3b, 0x5f, 0x53, + 0xcd, 0xf8, 0x76, 0xf2, 0xa9, 0x3b, 0xba, 0xc3, 0x47, 0x77, 0xd6, 0x54, 0x73, 0x67, 0x3b, 0xb9, + 0xd7, 0xc7, 0x3d, 0xce, 0xff, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xac, 0x72, 0x95, 0x44, 0x8f, 0x1b, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go new file mode 100644 index 000000000..dfc87966b --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go @@ -0,0 +1,411 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/iam/v1/iam_policy.proto + +package iam // import "google.golang.org/genproto/googleapis/iam/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Request message for `SetIamPolicy` method. +type SetIamPolicyRequest struct { + // REQUIRED: The resource for which the policy is being specified. + // `resource` is usually specified as a path. For example, a Project + // resource is specified as `projects/{project}`. + Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // REQUIRED: The complete policy to be applied to the `resource`. The size of + // the policy is limited to a few 10s of KB. An empty policy is a + // valid policy but certain Cloud Platform services (such as Projects) + // might reject them. + Policy *Policy `protobuf:"bytes,2,opt,name=policy,proto3" json:"policy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetIamPolicyRequest) Reset() { *m = SetIamPolicyRequest{} } +func (m *SetIamPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*SetIamPolicyRequest) ProtoMessage() {} +func (*SetIamPolicyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_iam_policy_58547b5cf2e9d67a, []int{0} +} +func (m *SetIamPolicyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetIamPolicyRequest.Unmarshal(m, b) +} +func (m *SetIamPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetIamPolicyRequest.Marshal(b, m, deterministic) +} +func (dst *SetIamPolicyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetIamPolicyRequest.Merge(dst, src) +} +func (m *SetIamPolicyRequest) XXX_Size() int { + return xxx_messageInfo_SetIamPolicyRequest.Size(m) +} +func (m *SetIamPolicyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetIamPolicyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetIamPolicyRequest proto.InternalMessageInfo + +func (m *SetIamPolicyRequest) GetResource() string { + if m != nil { + return m.Resource + } + return "" +} + +func (m *SetIamPolicyRequest) GetPolicy() *Policy { + if m != nil { + return m.Policy + } + return nil +} + +// Request message for `GetIamPolicy` method. +type GetIamPolicyRequest struct { + // REQUIRED: The resource for which the policy is being requested. + // `resource` is usually specified as a path. For example, a Project + // resource is specified as `projects/{project}`. + Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetIamPolicyRequest) Reset() { *m = GetIamPolicyRequest{} } +func (m *GetIamPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*GetIamPolicyRequest) ProtoMessage() {} +func (*GetIamPolicyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_iam_policy_58547b5cf2e9d67a, []int{1} +} +func (m *GetIamPolicyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetIamPolicyRequest.Unmarshal(m, b) +} +func (m *GetIamPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetIamPolicyRequest.Marshal(b, m, deterministic) +} +func (dst *GetIamPolicyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetIamPolicyRequest.Merge(dst, src) +} +func (m *GetIamPolicyRequest) XXX_Size() int { + return xxx_messageInfo_GetIamPolicyRequest.Size(m) +} +func (m *GetIamPolicyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetIamPolicyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetIamPolicyRequest proto.InternalMessageInfo + +func (m *GetIamPolicyRequest) GetResource() string { + if m != nil { + return m.Resource + } + return "" +} + +// Request message for `TestIamPermissions` method. +type TestIamPermissionsRequest struct { + // REQUIRED: The resource for which the policy detail is being requested. + // `resource` is usually specified as a path. For example, a Project + // resource is specified as `projects/{project}`. + Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // The set of permissions to check for the `resource`. Permissions with + // wildcards (such as '*' or 'storage.*') are not allowed. For more + // information see + // [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). + Permissions []string `protobuf:"bytes,2,rep,name=permissions,proto3" json:"permissions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TestIamPermissionsRequest) Reset() { *m = TestIamPermissionsRequest{} } +func (m *TestIamPermissionsRequest) String() string { return proto.CompactTextString(m) } +func (*TestIamPermissionsRequest) ProtoMessage() {} +func (*TestIamPermissionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_iam_policy_58547b5cf2e9d67a, []int{2} +} +func (m *TestIamPermissionsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TestIamPermissionsRequest.Unmarshal(m, b) +} +func (m *TestIamPermissionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TestIamPermissionsRequest.Marshal(b, m, deterministic) +} +func (dst *TestIamPermissionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TestIamPermissionsRequest.Merge(dst, src) +} +func (m *TestIamPermissionsRequest) XXX_Size() int { + return xxx_messageInfo_TestIamPermissionsRequest.Size(m) +} +func (m *TestIamPermissionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TestIamPermissionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TestIamPermissionsRequest proto.InternalMessageInfo + +func (m *TestIamPermissionsRequest) GetResource() string { + if m != nil { + return m.Resource + } + return "" +} + +func (m *TestIamPermissionsRequest) GetPermissions() []string { + if m != nil { + return m.Permissions + } + return nil +} + +// Response message for `TestIamPermissions` method. +type TestIamPermissionsResponse struct { + // A subset of `TestPermissionsRequest.permissions` that the caller is + // allowed. + Permissions []string `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TestIamPermissionsResponse) Reset() { *m = TestIamPermissionsResponse{} } +func (m *TestIamPermissionsResponse) String() string { return proto.CompactTextString(m) } +func (*TestIamPermissionsResponse) ProtoMessage() {} +func (*TestIamPermissionsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_iam_policy_58547b5cf2e9d67a, []int{3} +} +func (m *TestIamPermissionsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TestIamPermissionsResponse.Unmarshal(m, b) +} +func (m *TestIamPermissionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TestIamPermissionsResponse.Marshal(b, m, deterministic) +} +func (dst *TestIamPermissionsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TestIamPermissionsResponse.Merge(dst, src) +} +func (m *TestIamPermissionsResponse) XXX_Size() int { + return xxx_messageInfo_TestIamPermissionsResponse.Size(m) +} +func (m *TestIamPermissionsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TestIamPermissionsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TestIamPermissionsResponse proto.InternalMessageInfo + +func (m *TestIamPermissionsResponse) GetPermissions() []string { + if m != nil { + return m.Permissions + } + return nil +} + +func init() { + proto.RegisterType((*SetIamPolicyRequest)(nil), "google.iam.v1.SetIamPolicyRequest") + proto.RegisterType((*GetIamPolicyRequest)(nil), "google.iam.v1.GetIamPolicyRequest") + proto.RegisterType((*TestIamPermissionsRequest)(nil), "google.iam.v1.TestIamPermissionsRequest") + proto.RegisterType((*TestIamPermissionsResponse)(nil), "google.iam.v1.TestIamPermissionsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// IAMPolicyClient is the client API for IAMPolicy service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type IAMPolicyClient interface { + // Sets the access control policy on the specified resource. Replaces any + // existing policy. + SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) + // Gets the access control policy for a resource. + // Returns an empty policy if the resource exists and does not have a policy + // set. + GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) + // Returns permissions that a caller has on the specified resource. + // If the resource does not exist, this will return an empty set of + // permissions, not a NOT_FOUND error. + TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) +} + +type iAMPolicyClient struct { + cc *grpc.ClientConn +} + +func NewIAMPolicyClient(cc *grpc.ClientConn) IAMPolicyClient { + return &iAMPolicyClient{cc} +} + +func (c *iAMPolicyClient) SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) { + out := new(Policy) + err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/SetIamPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMPolicyClient) GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) { + out := new(Policy) + err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/GetIamPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMPolicyClient) TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) { + out := new(TestIamPermissionsResponse) + err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/TestIamPermissions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// IAMPolicyServer is the server API for IAMPolicy service. +type IAMPolicyServer interface { + // Sets the access control policy on the specified resource. Replaces any + // existing policy. + SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error) + // Gets the access control policy for a resource. + // Returns an empty policy if the resource exists and does not have a policy + // set. + GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error) + // Returns permissions that a caller has on the specified resource. + // If the resource does not exist, this will return an empty set of + // permissions, not a NOT_FOUND error. + TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error) +} + +func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) { + s.RegisterService(&_IAMPolicy_serviceDesc, srv) +} + +func _IAMPolicy_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMPolicyServer).SetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.v1.IAMPolicy/SetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMPolicyServer).SetIamPolicy(ctx, req.(*SetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAMPolicy_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMPolicyServer).GetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.v1.IAMPolicy/GetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMPolicyServer).GetIamPolicy(ctx, req.(*GetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAMPolicy_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TestIamPermissionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMPolicyServer).TestIamPermissions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.v1.IAMPolicy/TestIamPermissions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMPolicyServer).TestIamPermissions(ctx, req.(*TestIamPermissionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _IAMPolicy_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.iam.v1.IAMPolicy", + HandlerType: (*IAMPolicyServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SetIamPolicy", + Handler: _IAMPolicy_SetIamPolicy_Handler, + }, + { + MethodName: "GetIamPolicy", + Handler: _IAMPolicy_GetIamPolicy_Handler, + }, + { + MethodName: "TestIamPermissions", + Handler: _IAMPolicy_TestIamPermissions_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/iam/v1/iam_policy.proto", +} + +func init() { + proto.RegisterFile("google/iam/v1/iam_policy.proto", fileDescriptor_iam_policy_58547b5cf2e9d67a) +} + +var fileDescriptor_iam_policy_58547b5cf2e9d67a = []byte{ + // 411 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0xcf, 0x4c, 0xcc, 0xd5, 0x2f, 0x33, 0x04, 0x51, 0xf1, 0x05, 0xf9, 0x39, 0x99, + 0xc9, 0x95, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0xbc, 0x10, 0x79, 0xbd, 0xcc, 0xc4, 0x5c, + 0xbd, 0x32, 0x43, 0x29, 0x19, 0xa8, 0xf2, 0xc4, 0x82, 0x4c, 0xfd, 0xc4, 0xbc, 0xbc, 0xfc, 0x92, + 0xc4, 0x92, 0xcc, 0xfc, 0xbc, 0x62, 0x88, 0x62, 0x29, 0x29, 0x54, 0xc3, 0x90, 0x0d, 0x52, 0x4a, + 0xe0, 0x12, 0x0e, 0x4e, 0x2d, 0xf1, 0x4c, 0xcc, 0x0d, 0x00, 0x8b, 0x06, 0xa5, 0x16, 0x96, 0xa6, + 0x16, 0x97, 0x08, 0x49, 0x71, 0x71, 0x14, 0xa5, 0x16, 0xe7, 0x97, 0x16, 0x25, 0xa7, 0x4a, 0x30, + 0x2a, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xf9, 0x42, 0xba, 0x5c, 0x6c, 0x10, 0x23, 0x24, 0x98, 0x14, + 0x18, 0x35, 0xb8, 0x8d, 0x44, 0xf5, 0x50, 0x1c, 0xa3, 0x07, 0x35, 0x09, 0xaa, 0x48, 0xc9, 0x90, + 0x4b, 0xd8, 0x9d, 0x34, 0x1b, 0x94, 0x22, 0xb9, 0x24, 0x43, 0x52, 0x8b, 0xc1, 0x7a, 0x52, 0x8b, + 0x72, 0x33, 0x8b, 0x8b, 0x41, 0x9e, 0x21, 0xc6, 0x69, 0x0a, 0x5c, 0xdc, 0x05, 0x08, 0x1d, 0x12, + 0x4c, 0x0a, 0xcc, 0x1a, 0x9c, 0x41, 0xc8, 0x42, 0x4a, 0x76, 0x5c, 0x52, 0xd8, 0x8c, 0x2e, 0x2e, + 0xc8, 0xcf, 0x2b, 0xc6, 0xd0, 0xcf, 0x88, 0xa1, 0xdf, 0x68, 0x0a, 0x33, 0x17, 0xa7, 0xa7, 0xa3, + 0x2f, 0xc4, 0x2f, 0x42, 0x25, 0x5c, 0x3c, 0xc8, 0xa1, 0x27, 0xa4, 0x84, 0x16, 0x14, 0x58, 0x82, + 0x56, 0x0a, 0x7b, 0x70, 0x29, 0x69, 0x36, 0x5d, 0x7e, 0x32, 0x99, 0x49, 0x59, 0x49, 0x0e, 0x14, + 0x45, 0xd5, 0x30, 0x1f, 0xd9, 0x6a, 0x69, 0xd5, 0x5a, 0x15, 0x23, 0x99, 0x62, 0xc5, 0xa8, 0x05, + 0xb2, 0xd5, 0x1d, 0x9f, 0xad, 0xee, 0x54, 0xb1, 0x35, 0x1d, 0xcd, 0xd6, 0x59, 0x8c, 0x5c, 0x42, + 0x98, 0x41, 0x27, 0xa4, 0x81, 0x66, 0x30, 0xce, 0x88, 0x93, 0xd2, 0x24, 0x42, 0x25, 0x24, 0x1e, + 0x94, 0xf4, 0xc1, 0xce, 0xd2, 0x54, 0x52, 0xc1, 0x74, 0x56, 0x09, 0x86, 0x2e, 0x2b, 0x46, 0x2d, + 0xa7, 0x36, 0x46, 0x2e, 0xc1, 0xe4, 0xfc, 0x5c, 0x54, 0x1b, 0x9c, 0xf8, 0xe0, 0x1e, 0x08, 0x00, + 0x25, 0xf6, 0x00, 0xc6, 0x28, 0x03, 0xa8, 0x82, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, + 0xa2, 0x74, 0xfd, 0xf4, 0xd4, 0x3c, 0x70, 0x56, 0xd0, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0x43, + 0x73, 0x8a, 0x75, 0x66, 0x62, 0xee, 0x0f, 0x46, 0xc6, 0x55, 0x4c, 0xc2, 0xee, 0x10, 0x5d, 0xce, + 0x39, 0xf9, 0xa5, 0x29, 0x7a, 0x9e, 0x89, 0xb9, 0x7a, 0x61, 0x86, 0xa7, 0x60, 0xa2, 0x31, 0x60, + 0xd1, 0x18, 0xcf, 0xc4, 0xdc, 0x98, 0x30, 0xc3, 0x24, 0x36, 0xb0, 0x59, 0xc6, 0x80, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xea, 0x62, 0x8f, 0x22, 0xc1, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go new file mode 100644 index 000000000..99dd75f26 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go @@ -0,0 +1,366 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/iam/v1/policy.proto + +package iam // import "google.golang.org/genproto/googleapis/iam/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The type of action performed on a Binding in a policy. +type BindingDelta_Action int32 + +const ( + // Unspecified. + BindingDelta_ACTION_UNSPECIFIED BindingDelta_Action = 0 + // Addition of a Binding. + BindingDelta_ADD BindingDelta_Action = 1 + // Removal of a Binding. + BindingDelta_REMOVE BindingDelta_Action = 2 +) + +var BindingDelta_Action_name = map[int32]string{ + 0: "ACTION_UNSPECIFIED", + 1: "ADD", + 2: "REMOVE", +} +var BindingDelta_Action_value = map[string]int32{ + "ACTION_UNSPECIFIED": 0, + "ADD": 1, + "REMOVE": 2, +} + +func (x BindingDelta_Action) String() string { + return proto.EnumName(BindingDelta_Action_name, int32(x)) +} +func (BindingDelta_Action) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_policy_6ba2a3dcbcdd909c, []int{3, 0} +} + +// Defines an Identity and Access Management (IAM) policy. It is used to +// specify access control policies for Cloud Platform resources. +// +// +// A `Policy` consists of a list of `bindings`. A `Binding` binds a list of +// `members` to a `role`, where the members can be user accounts, Google groups, +// Google domains, and service accounts. A `role` is a named list of permissions +// defined by IAM. +// +// **Example** +// +// { +// "bindings": [ +// { +// "role": "roles/owner", +// "members": [ +// "user:mike@example.com", +// "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-other-app@appspot.gserviceaccount.com", +// ] +// }, +// { +// "role": "roles/viewer", +// "members": ["user:sean@example.com"] +// } +// ] +// } +// +// For a description of IAM and its features, see the +// [IAM developer's guide](https://cloud.google.com/iam). +type Policy struct { + // Version of the `Policy`. The default version is 0. + Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // Associates a list of `members` to a `role`. + // Multiple `bindings` must not be specified for the same `role`. + // `bindings` with no members will result in an error. + Bindings []*Binding `protobuf:"bytes,4,rep,name=bindings,proto3" json:"bindings,omitempty"` + // `etag` is used for optimistic concurrency control as a way to help + // prevent simultaneous updates of a policy from overwriting each other. + // It is strongly suggested that systems make use of the `etag` in the + // read-modify-write cycle to perform policy updates in order to avoid race + // conditions: An `etag` is returned in the response to `getIamPolicy`, and + // systems are expected to put that etag in the request to `setIamPolicy` to + // ensure that their change will be applied to the same version of the policy. + // + // If no `etag` is provided in the call to `setIamPolicy`, then the existing + // policy is overwritten blindly. + Etag []byte `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Policy) Reset() { *m = Policy{} } +func (m *Policy) String() string { return proto.CompactTextString(m) } +func (*Policy) ProtoMessage() {} +func (*Policy) Descriptor() ([]byte, []int) { + return fileDescriptor_policy_6ba2a3dcbcdd909c, []int{0} +} +func (m *Policy) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Policy.Unmarshal(m, b) +} +func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Policy.Marshal(b, m, deterministic) +} +func (dst *Policy) XXX_Merge(src proto.Message) { + xxx_messageInfo_Policy.Merge(dst, src) +} +func (m *Policy) XXX_Size() int { + return xxx_messageInfo_Policy.Size(m) +} +func (m *Policy) XXX_DiscardUnknown() { + xxx_messageInfo_Policy.DiscardUnknown(m) +} + +var xxx_messageInfo_Policy proto.InternalMessageInfo + +func (m *Policy) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *Policy) GetBindings() []*Binding { + if m != nil { + return m.Bindings + } + return nil +} + +func (m *Policy) GetEtag() []byte { + if m != nil { + return m.Etag + } + return nil +} + +// Associates `members` with a `role`. +type Binding struct { + // Role that is assigned to `members`. + // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Required + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` + // Specifies the identities requesting access for a Cloud Platform resource. + // `members` can have the following values: + // + // * `allUsers`: A special identifier that represents anyone who is + // on the internet; with or without a Google account. + // + // * `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. + // + // * `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@gmail.com` or `joe@example.com`. + // + // + // * `serviceAccount:{emailid}`: An email address that represents a service + // account. For example, `my-other-app@appspot.gserviceaccount.com`. + // + // * `group:{emailid}`: An email address that represents a Google group. + // For example, `admins@example.com`. + // + // * `domain:{domain}`: A Google Apps domain name that represents all the + // users of that domain. For example, `google.com` or `example.com`. + // + // + Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Binding) Reset() { *m = Binding{} } +func (m *Binding) String() string { return proto.CompactTextString(m) } +func (*Binding) ProtoMessage() {} +func (*Binding) Descriptor() ([]byte, []int) { + return fileDescriptor_policy_6ba2a3dcbcdd909c, []int{1} +} +func (m *Binding) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Binding.Unmarshal(m, b) +} +func (m *Binding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Binding.Marshal(b, m, deterministic) +} +func (dst *Binding) XXX_Merge(src proto.Message) { + xxx_messageInfo_Binding.Merge(dst, src) +} +func (m *Binding) XXX_Size() int { + return xxx_messageInfo_Binding.Size(m) +} +func (m *Binding) XXX_DiscardUnknown() { + xxx_messageInfo_Binding.DiscardUnknown(m) +} + +var xxx_messageInfo_Binding proto.InternalMessageInfo + +func (m *Binding) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +func (m *Binding) GetMembers() []string { + if m != nil { + return m.Members + } + return nil +} + +// The difference delta between two policies. +type PolicyDelta struct { + // The delta for Bindings between two policies. + BindingDeltas []*BindingDelta `protobuf:"bytes,1,rep,name=binding_deltas,json=bindingDeltas,proto3" json:"binding_deltas,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PolicyDelta) Reset() { *m = PolicyDelta{} } +func (m *PolicyDelta) String() string { return proto.CompactTextString(m) } +func (*PolicyDelta) ProtoMessage() {} +func (*PolicyDelta) Descriptor() ([]byte, []int) { + return fileDescriptor_policy_6ba2a3dcbcdd909c, []int{2} +} +func (m *PolicyDelta) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PolicyDelta.Unmarshal(m, b) +} +func (m *PolicyDelta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PolicyDelta.Marshal(b, m, deterministic) +} +func (dst *PolicyDelta) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyDelta.Merge(dst, src) +} +func (m *PolicyDelta) XXX_Size() int { + return xxx_messageInfo_PolicyDelta.Size(m) +} +func (m *PolicyDelta) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyDelta.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyDelta proto.InternalMessageInfo + +func (m *PolicyDelta) GetBindingDeltas() []*BindingDelta { + if m != nil { + return m.BindingDeltas + } + return nil +} + +// One delta entry for Binding. Each individual change (only one member in each +// entry) to a binding will be a separate entry. +type BindingDelta struct { + // The action that was performed on a Binding. + // Required + Action BindingDelta_Action `protobuf:"varint,1,opt,name=action,proto3,enum=google.iam.v1.BindingDelta_Action" json:"action,omitempty"` + // Role that is assigned to `members`. + // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Required + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` + // A single identity requesting access for a Cloud Platform resource. + // Follows the same format of Binding.members. + // Required + Member string `protobuf:"bytes,3,opt,name=member,proto3" json:"member,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BindingDelta) Reset() { *m = BindingDelta{} } +func (m *BindingDelta) String() string { return proto.CompactTextString(m) } +func (*BindingDelta) ProtoMessage() {} +func (*BindingDelta) Descriptor() ([]byte, []int) { + return fileDescriptor_policy_6ba2a3dcbcdd909c, []int{3} +} +func (m *BindingDelta) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BindingDelta.Unmarshal(m, b) +} +func (m *BindingDelta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BindingDelta.Marshal(b, m, deterministic) +} +func (dst *BindingDelta) XXX_Merge(src proto.Message) { + xxx_messageInfo_BindingDelta.Merge(dst, src) +} +func (m *BindingDelta) XXX_Size() int { + return xxx_messageInfo_BindingDelta.Size(m) +} +func (m *BindingDelta) XXX_DiscardUnknown() { + xxx_messageInfo_BindingDelta.DiscardUnknown(m) +} + +var xxx_messageInfo_BindingDelta proto.InternalMessageInfo + +func (m *BindingDelta) GetAction() BindingDelta_Action { + if m != nil { + return m.Action + } + return BindingDelta_ACTION_UNSPECIFIED +} + +func (m *BindingDelta) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +func (m *BindingDelta) GetMember() string { + if m != nil { + return m.Member + } + return "" +} + +func init() { + proto.RegisterType((*Policy)(nil), "google.iam.v1.Policy") + proto.RegisterType((*Binding)(nil), "google.iam.v1.Binding") + proto.RegisterType((*PolicyDelta)(nil), "google.iam.v1.PolicyDelta") + proto.RegisterType((*BindingDelta)(nil), "google.iam.v1.BindingDelta") + proto.RegisterEnum("google.iam.v1.BindingDelta_Action", BindingDelta_Action_name, BindingDelta_Action_value) +} + +func init() { proto.RegisterFile("google/iam/v1/policy.proto", fileDescriptor_policy_6ba2a3dcbcdd909c) } + +var fileDescriptor_policy_6ba2a3dcbcdd909c = []byte{ + // 403 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0x4d, 0xab, 0x13, 0x31, + 0x14, 0x35, 0xed, 0x73, 0x6a, 0xef, 0xfb, 0xa0, 0x46, 0x28, 0xc3, 0xd3, 0x45, 0x99, 0x55, 0x57, + 0x19, 0x5b, 0x11, 0x41, 0x57, 0xfd, 0x18, 0x65, 0x16, 0xbe, 0x37, 0x46, 0xed, 0x42, 0x0a, 0x8f, + 0x4c, 0x1b, 0x42, 0x64, 0x92, 0x0c, 0x33, 0x63, 0xc1, 0xb5, 0xff, 0x46, 0xf0, 0x8f, 0xf8, 0x8b, + 0x5c, 0xca, 0x24, 0x99, 0x47, 0x0b, 0xe2, 0x2e, 0xe7, 0x9e, 0x73, 0x72, 0xcf, 0xcd, 0x0d, 0x5c, + 0x0b, 0x63, 0x44, 0xc1, 0x63, 0xc9, 0x54, 0x7c, 0x98, 0xc5, 0xa5, 0x29, 0xe4, 0xee, 0x3b, 0x29, + 0x2b, 0xd3, 0x18, 0x7c, 0xe9, 0x38, 0x22, 0x99, 0x22, 0x87, 0xd9, 0xf5, 0x33, 0x2f, 0x65, 0xa5, + 0x8c, 0x99, 0xd6, 0xa6, 0x61, 0x8d, 0x34, 0xba, 0x76, 0xe2, 0xe8, 0x2b, 0x04, 0x99, 0x35, 0xe3, + 0x10, 0x06, 0x07, 0x5e, 0xd5, 0xd2, 0xe8, 0x10, 0x4d, 0xd0, 0xf4, 0x21, 0xed, 0x20, 0x9e, 0xc3, + 0xa3, 0x5c, 0xea, 0xbd, 0xd4, 0xa2, 0x0e, 0xcf, 0x26, 0xfd, 0xe9, 0xf9, 0x7c, 0x4c, 0x4e, 0x7a, + 0x90, 0xa5, 0xa3, 0xe9, 0xbd, 0x0e, 0x63, 0x38, 0xe3, 0x0d, 0x13, 0x61, 0x7f, 0x82, 0xa6, 0x17, + 0xd4, 0x9e, 0xa3, 0x57, 0x30, 0xf0, 0xc2, 0x96, 0xae, 0x4c, 0xc1, 0x6d, 0xa7, 0x21, 0xb5, 0xe7, + 0x36, 0x80, 0xe2, 0x2a, 0xe7, 0x55, 0x1d, 0xf6, 0x26, 0xfd, 0xe9, 0x90, 0x76, 0x30, 0xfa, 0x00, + 0xe7, 0x2e, 0xe4, 0x9a, 0x17, 0x0d, 0xc3, 0x4b, 0xb8, 0xf2, 0x7d, 0xee, 0xf6, 0x6d, 0xa1, 0x0e, + 0x91, 0x4d, 0xf5, 0xf4, 0xdf, 0xa9, 0xac, 0x89, 0x5e, 0xe6, 0x47, 0xa8, 0x8e, 0x7e, 0x21, 0xb8, + 0x38, 0xe6, 0xf1, 0x6b, 0x08, 0xd8, 0xae, 0xe9, 0xa6, 0xbf, 0x9a, 0x47, 0xff, 0xb9, 0x8c, 0x2c, + 0xac, 0x92, 0x7a, 0xc7, 0xfd, 0x34, 0xbd, 0xa3, 0x69, 0xc6, 0x10, 0xb8, 0xf8, 0xf6, 0x09, 0x86, + 0xd4, 0xa3, 0xe8, 0x25, 0x04, 0xce, 0x8d, 0xc7, 0x80, 0x17, 0xab, 0x4f, 0xe9, 0xed, 0xcd, 0xdd, + 0xe7, 0x9b, 0x8f, 0x59, 0xb2, 0x4a, 0xdf, 0xa6, 0xc9, 0x7a, 0xf4, 0x00, 0x0f, 0xa0, 0xbf, 0x58, + 0xaf, 0x47, 0x08, 0x03, 0x04, 0x34, 0x79, 0x7f, 0xbb, 0x49, 0x46, 0xbd, 0xe5, 0x0f, 0x04, 0x8f, + 0x77, 0x46, 0x9d, 0x86, 0x5a, 0xfa, 0x67, 0xc9, 0xda, 0x55, 0x66, 0xe8, 0xcb, 0x73, 0xcf, 0x0a, + 0x53, 0x30, 0x2d, 0x88, 0xa9, 0x44, 0x2c, 0xb8, 0xb6, 0x8b, 0x8e, 0x1d, 0xc5, 0x4a, 0x59, 0xfb, + 0x4f, 0xf3, 0x46, 0x32, 0xf5, 0x07, 0xa1, 0x9f, 0xbd, 0x27, 0xef, 0x9c, 0x6b, 0x55, 0x98, 0x6f, + 0x7b, 0x92, 0x32, 0x45, 0x36, 0xb3, 0xdf, 0x5d, 0x75, 0x6b, 0xab, 0xdb, 0x94, 0xa9, 0xed, 0x66, + 0x96, 0x07, 0xf6, 0xae, 0x17, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x18, 0xca, 0xaa, 0x7f, + 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index 8867ae781..7bfe37a3d 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,21 +1,12 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/rpc/status.proto -/* -Package status is a generated protocol buffer package. - -It is generated from these files: - google/rpc/status.proto - -It has these top-level messages: - Status -*/ -package status +package status // import "google.golang.org/genproto/googleapis/rpc/status" import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/any" +import any "github.com/golang/protobuf/ptypes/any" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -82,20 +73,42 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // be used directly after any stripping needed for security/privacy reasons. type Status struct { // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. - Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"` + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. - Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. - Details []*google_protobuf.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"` + Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_status_c6e4de62dcdf2edf, []int{0} +} +func (m *Status) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Status.Unmarshal(m, b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) +} +func (dst *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(dst, src) +} +func (m *Status) XXX_Size() int { + return xxx_messageInfo_Status.Size(m) +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) } -func (m *Status) Reset() { *m = Status{} } -func (m *Status) String() string { return proto.CompactTextString(m) } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +var xxx_messageInfo_Status proto.InternalMessageInfo func (m *Status) GetCode() int32 { if m != nil { @@ -111,7 +124,7 @@ func (m *Status) GetMessage() string { return "" } -func (m *Status) GetDetails() []*google_protobuf.Any { +func (m *Status) GetDetails() []*any.Any { if m != nil { return m.Details } @@ -122,9 +135,9 @@ func init() { proto.RegisterType((*Status)(nil), "google.rpc.Status") } -func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor0) } +func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_status_c6e4de62dcdf2edf) } -var fileDescriptor0 = []byte{ +var fileDescriptor_status_c6e4de62dcdf2edf = []byte{ // 209 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go new file mode 100644 index 000000000..86886693f --- /dev/null +++ b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go @@ -0,0 +1,280 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/field_mask.proto + +package field_mask // import "google.golang.org/genproto/protobuf/field_mask" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, new values will +// be appended to the existing repeated field in the target resource. Note that +// a repeated field is only allowed in the last position of a `paths` string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then new value will be merged into the existing sub-message +// in the target resource. +// +// For example, given the target message: +// +// f { +// b { +// d: 1 +// x: 2 +// } +// c: [1] +// } +// +// And an update message: +// +// f { +// b { +// d: 10 +// } +// c: [2] +// } +// +// then if the field mask is: +// +// paths: ["f.b", "f.c"] +// +// then the result will be: +// +// f { +// b { +// d: 10 +// x: 2 +// } +// c: [1, 2] +// } +// +// An implementation may provide options to override this default behavior for +// repeated and message fields. +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of any API method which has a FieldMask type field in the +// request should verify the included field paths, and return an +// `INVALID_ARGUMENT` error if any path is duplicated or unmappable. +type FieldMask struct { + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldMask) Reset() { *m = FieldMask{} } +func (m *FieldMask) String() string { return proto.CompactTextString(m) } +func (*FieldMask) ProtoMessage() {} +func (*FieldMask) Descriptor() ([]byte, []int) { + return fileDescriptor_field_mask_02a8b0c0831edcce, []int{0} +} +func (m *FieldMask) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldMask.Unmarshal(m, b) +} +func (m *FieldMask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldMask.Marshal(b, m, deterministic) +} +func (dst *FieldMask) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldMask.Merge(dst, src) +} +func (m *FieldMask) XXX_Size() int { + return xxx_messageInfo_FieldMask.Size(m) +} +func (m *FieldMask) XXX_DiscardUnknown() { + xxx_messageInfo_FieldMask.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldMask proto.InternalMessageInfo + +func (m *FieldMask) GetPaths() []string { + if m != nil { + return m.Paths + } + return nil +} + +func init() { + proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask") +} + +func init() { + proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_field_mask_02a8b0c0831edcce) +} + +var fileDescriptor_field_mask_02a8b0c0831edcce = []byte{ + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd, + 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54, + 0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16, + 0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x3d, 0x8c, + 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x5a, 0x9d, 0xf8, 0xe0, 0x1a, 0x03, 0x40, 0x42, 0x01, + 0x8c, 0x51, 0x96, 0x50, 0x25, 0xe9, 0xf9, 0x39, 0x89, 0x79, 0xe9, 0x7a, 0xf9, 0x45, 0xe9, 0xfa, + 0xe9, 0xa9, 0x79, 0x60, 0x0d, 0xd8, 0xdc, 0x64, 0x8d, 0x60, 0xfe, 0x60, 0x64, 0x5c, 0xc4, 0xc4, + 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x48, 0x00, 0x54, 0x83, 0x5e, 0x78, 0x6a, + 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x48, 0x65, 0x41, 0x6a, 0x71, 0x12, 0x1b, 0xd8, 0x24, + 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfd, 0xda, 0xb7, 0xa8, 0xed, 0x00, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go new file mode 100644 index 000000000..f6d597a14 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go @@ -0,0 +1,173 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package oauth implements gRPC credentials using OAuth. +package oauth + +import ( + "fmt" + "io/ioutil" + "sync" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/grpc/credentials" +) + +// TokenSource supplies PerRPCCredentials from an oauth2.TokenSource. +type TokenSource struct { + oauth2.TokenSource +} + +// GetRequestMetadata gets the request metadata as a map from a TokenSource. +func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + token, err := ts.Token() + if err != nil { + return nil, err + } + return map[string]string{ + "authorization": token.Type() + " " + token.AccessToken, + }, nil +} + +// RequireTransportSecurity indicates whether the credentials requires transport security. +func (ts TokenSource) RequireTransportSecurity() bool { + return true +} + +type jwtAccess struct { + jsonKey []byte +} + +// NewJWTAccessFromFile creates PerRPCCredentials from the given keyFile. +func NewJWTAccessFromFile(keyFile string) (credentials.PerRPCCredentials, error) { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewJWTAccessFromKey(jsonKey) +} + +// NewJWTAccessFromKey creates PerRPCCredentials from the given jsonKey. +func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) { + return jwtAccess{jsonKey}, nil +} + +func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0]) + if err != nil { + return nil, err + } + token, err := ts.Token() + if err != nil { + return nil, err + } + return map[string]string{ + "authorization": token.Type() + " " + token.AccessToken, + }, nil +} + +func (j jwtAccess) RequireTransportSecurity() bool { + return true +} + +// oauthAccess supplies PerRPCCredentials from a given token. +type oauthAccess struct { + token oauth2.Token +} + +// NewOauthAccess constructs the PerRPCCredentials using a given token. +func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { + return oauthAccess{token: *token} +} + +func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + return map[string]string{ + "authorization": oa.token.Type() + " " + oa.token.AccessToken, + }, nil +} + +func (oa oauthAccess) RequireTransportSecurity() bool { + return true +} + +// NewComputeEngine constructs the PerRPCCredentials that fetches access tokens from +// Google Compute Engine (GCE)'s metadata server. It is only valid to use this +// if your program is running on a GCE instance. +// TODO(dsymonds): Deprecate and remove this. +func NewComputeEngine() credentials.PerRPCCredentials { + return TokenSource{google.ComputeTokenSource("")} +} + +// serviceAccount represents PerRPCCredentials via JWT signing key. +type serviceAccount struct { + mu sync.Mutex + config *jwt.Config + t *oauth2.Token +} + +func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + s.mu.Lock() + defer s.mu.Unlock() + if !s.t.Valid() { + var err error + s.t, err = s.config.TokenSource(ctx).Token() + if err != nil { + return nil, err + } + } + return map[string]string{ + "authorization": s.t.Type() + " " + s.t.AccessToken, + }, nil +} + +func (s *serviceAccount) RequireTransportSecurity() bool { + return true +} + +// NewServiceAccountFromKey constructs the PerRPCCredentials using the JSON key slice +// from a Google Developers service account. +func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerRPCCredentials, error) { + config, err := google.JWTConfigFromJSON(jsonKey, scope...) + if err != nil { + return nil, err + } + return &serviceAccount{config: config}, nil +} + +// NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file +// of a Google Developers service account. +func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.PerRPCCredentials, error) { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewServiceAccountFromKey(jsonKey, scope...) +} + +// NewApplicationDefault returns "Application Default Credentials". For more +// detail, see https://developers.google.com/accounts/docs/application-default-credentials. +func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.PerRPCCredentials, error) { + t, err := google.DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return TokenSource{t}, nil +}